This source file includes following definitions.
- batadv_choose_claim
- batadv_choose_backbone_gw
- batadv_compare_backbone_gw
- batadv_compare_claim
- batadv_backbone_gw_release
- batadv_backbone_gw_put
- batadv_claim_release
- batadv_claim_put
- batadv_claim_hash_find
- batadv_backbone_hash_find
- batadv_bla_del_backbone_claims
- batadv_bla_send_claim
- batadv_bla_loopdetect_report
- batadv_bla_get_backbone_gw
- batadv_bla_update_own_backbone_gw
- batadv_bla_answer_request
- batadv_bla_send_request
- batadv_bla_send_announce
- batadv_bla_add_claim
- batadv_bla_claim_get_backbone_gw
- batadv_bla_del_claim
- batadv_handle_announce
- batadv_handle_request
- batadv_handle_unclaim
- batadv_handle_claim
- batadv_check_claim_group
- batadv_bla_process_claim
- batadv_bla_purge_backbone_gw
- batadv_bla_purge_claims
- batadv_bla_update_orig_address
- batadv_bla_send_loopdetect
- batadv_bla_status_update
- batadv_bla_periodic_work
- batadv_bla_init
- batadv_bla_check_bcast_duplist
- batadv_bla_is_backbone_gw_orig
- batadv_bla_is_backbone_gw
- batadv_bla_free
- batadv_bla_loopdetect_check
- batadv_bla_rx
- batadv_bla_tx
- batadv_bla_claim_table_seq_print_text
- batadv_bla_claim_dump_entry
- batadv_bla_claim_dump_bucket
- batadv_bla_claim_dump
- batadv_bla_backbone_table_seq_print_text
- batadv_bla_backbone_dump_entry
- batadv_bla_backbone_dump_bucket
- batadv_bla_backbone_dump
- batadv_bla_check_claim
   1 
   2 
   3 
   4 
   5 
   6 
   7 #include "bridge_loop_avoidance.h"
   8 #include "main.h"
   9 
  10 #include <linux/atomic.h>
  11 #include <linux/byteorder/generic.h>
  12 #include <linux/compiler.h>
  13 #include <linux/crc16.h>
  14 #include <linux/errno.h>
  15 #include <linux/etherdevice.h>
  16 #include <linux/gfp.h>
  17 #include <linux/if_arp.h>
  18 #include <linux/if_ether.h>
  19 #include <linux/if_vlan.h>
  20 #include <linux/jhash.h>
  21 #include <linux/jiffies.h>
  22 #include <linux/kernel.h>
  23 #include <linux/kref.h>
  24 #include <linux/list.h>
  25 #include <linux/lockdep.h>
  26 #include <linux/netdevice.h>
  27 #include <linux/netlink.h>
  28 #include <linux/rculist.h>
  29 #include <linux/rcupdate.h>
  30 #include <linux/seq_file.h>
  31 #include <linux/skbuff.h>
  32 #include <linux/slab.h>
  33 #include <linux/spinlock.h>
  34 #include <linux/stddef.h>
  35 #include <linux/string.h>
  36 #include <linux/workqueue.h>
  37 #include <net/arp.h>
  38 #include <net/genetlink.h>
  39 #include <net/netlink.h>
  40 #include <net/sock.h>
  41 #include <uapi/linux/batadv_packet.h>
  42 #include <uapi/linux/batman_adv.h>
  43 
  44 #include "hard-interface.h"
  45 #include "hash.h"
  46 #include "log.h"
  47 #include "netlink.h"
  48 #include "originator.h"
  49 #include "soft-interface.h"
  50 #include "translation-table.h"
  51 
  52 static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
  53 
  54 static void batadv_bla_periodic_work(struct work_struct *work);
  55 static void
  56 batadv_bla_send_announce(struct batadv_priv *bat_priv,
  57                          struct batadv_bla_backbone_gw *backbone_gw);
  58 
  59 
  60 
  61 
  62 
  63 
  64 
  65 
  66 static inline u32 batadv_choose_claim(const void *data, u32 size)
  67 {
  68         struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
  69         u32 hash = 0;
  70 
  71         hash = jhash(&claim->addr, sizeof(claim->addr), hash);
  72         hash = jhash(&claim->vid, sizeof(claim->vid), hash);
  73 
  74         return hash % size;
  75 }
  76 
  77 
  78 
  79 
  80 
  81 
  82 
  83 
  84 static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
  85 {
  86         const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
  87         u32 hash = 0;
  88 
  89         hash = jhash(&claim->addr, sizeof(claim->addr), hash);
  90         hash = jhash(&claim->vid, sizeof(claim->vid), hash);
  91 
  92         return hash % size;
  93 }
  94 
  95 
  96 
  97 
  98 
  99 
 100 
 101 
 102 static bool batadv_compare_backbone_gw(const struct hlist_node *node,
 103                                        const void *data2)
 104 {
 105         const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
 106                                          hash_entry);
 107         const struct batadv_bla_backbone_gw *gw1 = data1;
 108         const struct batadv_bla_backbone_gw *gw2 = data2;
 109 
 110         if (!batadv_compare_eth(gw1->orig, gw2->orig))
 111                 return false;
 112 
 113         if (gw1->vid != gw2->vid)
 114                 return false;
 115 
 116         return true;
 117 }
 118 
 119 
 120 
 121 
 122 
 123 
 124 
 125 
 126 static bool batadv_compare_claim(const struct hlist_node *node,
 127                                  const void *data2)
 128 {
 129         const void *data1 = container_of(node, struct batadv_bla_claim,
 130                                          hash_entry);
 131         const struct batadv_bla_claim *cl1 = data1;
 132         const struct batadv_bla_claim *cl2 = data2;
 133 
 134         if (!batadv_compare_eth(cl1->addr, cl2->addr))
 135                 return false;
 136 
 137         if (cl1->vid != cl2->vid)
 138                 return false;
 139 
 140         return true;
 141 }
 142 
 143 
 144 
 145 
 146 
 147 
 148 static void batadv_backbone_gw_release(struct kref *ref)
 149 {
 150         struct batadv_bla_backbone_gw *backbone_gw;
 151 
 152         backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
 153                                    refcount);
 154 
 155         kfree_rcu(backbone_gw, rcu);
 156 }
 157 
 158 
 159 
 160 
 161 
 162 
 163 static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
 164 {
 165         kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
 166 }
 167 
 168 
 169 
 170 
 171 
 172 
 173 static void batadv_claim_release(struct kref *ref)
 174 {
 175         struct batadv_bla_claim *claim;
 176         struct batadv_bla_backbone_gw *old_backbone_gw;
 177 
 178         claim = container_of(ref, struct batadv_bla_claim, refcount);
 179 
 180         spin_lock_bh(&claim->backbone_lock);
 181         old_backbone_gw = claim->backbone_gw;
 182         claim->backbone_gw = NULL;
 183         spin_unlock_bh(&claim->backbone_lock);
 184 
 185         spin_lock_bh(&old_backbone_gw->crc_lock);
 186         old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
 187         spin_unlock_bh(&old_backbone_gw->crc_lock);
 188 
 189         batadv_backbone_gw_put(old_backbone_gw);
 190 
 191         kfree_rcu(claim, rcu);
 192 }
 193 
 194 
 195 
 196 
 197 
 198 static void batadv_claim_put(struct batadv_bla_claim *claim)
 199 {
 200         kref_put(&claim->refcount, batadv_claim_release);
 201 }
 202 
 203 
 204 
 205 
 206 
 207 
 208 
 209 
 210 static struct batadv_bla_claim *
 211 batadv_claim_hash_find(struct batadv_priv *bat_priv,
 212                        struct batadv_bla_claim *data)
 213 {
 214         struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
 215         struct hlist_head *head;
 216         struct batadv_bla_claim *claim;
 217         struct batadv_bla_claim *claim_tmp = NULL;
 218         int index;
 219 
 220         if (!hash)
 221                 return NULL;
 222 
 223         index = batadv_choose_claim(data, hash->size);
 224         head = &hash->table[index];
 225 
 226         rcu_read_lock();
 227         hlist_for_each_entry_rcu(claim, head, hash_entry) {
 228                 if (!batadv_compare_claim(&claim->hash_entry, data))
 229                         continue;
 230 
 231                 if (!kref_get_unless_zero(&claim->refcount))
 232                         continue;
 233 
 234                 claim_tmp = claim;
 235                 break;
 236         }
 237         rcu_read_unlock();
 238 
 239         return claim_tmp;
 240 }
 241 
 242 
 243 
 244 
 245 
 246 
 247 
 248 
 249 
 250 static struct batadv_bla_backbone_gw *
 251 batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
 252                           unsigned short vid)
 253 {
 254         struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
 255         struct hlist_head *head;
 256         struct batadv_bla_backbone_gw search_entry, *backbone_gw;
 257         struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
 258         int index;
 259 
 260         if (!hash)
 261                 return NULL;
 262 
 263         ether_addr_copy(search_entry.orig, addr);
 264         search_entry.vid = vid;
 265 
 266         index = batadv_choose_backbone_gw(&search_entry, hash->size);
 267         head = &hash->table[index];
 268 
 269         rcu_read_lock();
 270         hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
 271                 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
 272                                                 &search_entry))
 273                         continue;
 274 
 275                 if (!kref_get_unless_zero(&backbone_gw->refcount))
 276                         continue;
 277 
 278                 backbone_gw_tmp = backbone_gw;
 279                 break;
 280         }
 281         rcu_read_unlock();
 282 
 283         return backbone_gw_tmp;
 284 }
 285 
 286 
 287 
 288 
 289 
 290 static void
 291 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
 292 {
 293         struct batadv_hashtable *hash;
 294         struct hlist_node *node_tmp;
 295         struct hlist_head *head;
 296         struct batadv_bla_claim *claim;
 297         int i;
 298         spinlock_t *list_lock;  
 299 
 300         hash = backbone_gw->bat_priv->bla.claim_hash;
 301         if (!hash)
 302                 return;
 303 
 304         for (i = 0; i < hash->size; i++) {
 305                 head = &hash->table[i];
 306                 list_lock = &hash->list_locks[i];
 307 
 308                 spin_lock_bh(list_lock);
 309                 hlist_for_each_entry_safe(claim, node_tmp,
 310                                           head, hash_entry) {
 311                         if (claim->backbone_gw != backbone_gw)
 312                                 continue;
 313 
 314                         batadv_claim_put(claim);
 315                         hlist_del_rcu(&claim->hash_entry);
 316                 }
 317                 spin_unlock_bh(list_lock);
 318         }
 319 
 320         
 321         spin_lock_bh(&backbone_gw->crc_lock);
 322         backbone_gw->crc = BATADV_BLA_CRC_INIT;
 323         spin_unlock_bh(&backbone_gw->crc_lock);
 324 }
 325 
 326 
 327 
 328 
 329 
 330 
 331 
 332 
 333 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
 334                                   unsigned short vid, int claimtype)
 335 {
 336         struct sk_buff *skb;
 337         struct ethhdr *ethhdr;
 338         struct batadv_hard_iface *primary_if;
 339         struct net_device *soft_iface;
 340         u8 *hw_src;
 341         struct batadv_bla_claim_dst local_claim_dest;
 342         __be32 zeroip = 0;
 343 
 344         primary_if = batadv_primary_if_get_selected(bat_priv);
 345         if (!primary_if)
 346                 return;
 347 
 348         memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
 349                sizeof(local_claim_dest));
 350         local_claim_dest.type = claimtype;
 351 
 352         soft_iface = primary_if->soft_iface;
 353 
 354         skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
 355                          
 356                          zeroip,
 357                          primary_if->soft_iface,
 358                          
 359                          zeroip,
 360                          
 361                          NULL,
 362                          
 363                          primary_if->net_dev->dev_addr,
 364                          
 365 
 366 
 367 
 368                          (u8 *)&local_claim_dest);
 369 
 370         if (!skb)
 371                 goto out;
 372 
 373         ethhdr = (struct ethhdr *)skb->data;
 374         hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
 375 
 376         
 377         switch (claimtype) {
 378         case BATADV_CLAIM_TYPE_CLAIM:
 379                 
 380 
 381 
 382                 ether_addr_copy(ethhdr->h_source, mac);
 383                 batadv_dbg(BATADV_DBG_BLA, bat_priv,
 384                            "%s(): CLAIM %pM on vid %d\n", __func__, mac,
 385                            batadv_print_vid(vid));
 386                 break;
 387         case BATADV_CLAIM_TYPE_UNCLAIM:
 388                 
 389 
 390 
 391                 ether_addr_copy(hw_src, mac);
 392                 batadv_dbg(BATADV_DBG_BLA, bat_priv,
 393                            "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
 394                            batadv_print_vid(vid));
 395                 break;
 396         case BATADV_CLAIM_TYPE_ANNOUNCE:
 397                 
 398 
 399 
 400                 ether_addr_copy(hw_src, mac);
 401                 batadv_dbg(BATADV_DBG_BLA, bat_priv,
 402                            "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
 403                            ethhdr->h_source, batadv_print_vid(vid));
 404                 break;
 405         case BATADV_CLAIM_TYPE_REQUEST:
 406                 
 407 
 408 
 409 
 410                 ether_addr_copy(hw_src, mac);
 411                 ether_addr_copy(ethhdr->h_dest, mac);
 412                 batadv_dbg(BATADV_DBG_BLA, bat_priv,
 413                            "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
 414                            ethhdr->h_source, ethhdr->h_dest,
 415                            batadv_print_vid(vid));
 416                 break;
 417         case BATADV_CLAIM_TYPE_LOOPDETECT:
 418                 ether_addr_copy(ethhdr->h_source, mac);
 419                 batadv_dbg(BATADV_DBG_BLA, bat_priv,
 420                            "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
 421                            __func__, ethhdr->h_source, ethhdr->h_dest,
 422                            batadv_print_vid(vid));
 423 
 424                 break;
 425         }
 426 
 427         if (vid & BATADV_VLAN_HAS_TAG) {
 428                 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
 429                                       vid & VLAN_VID_MASK);
 430                 if (!skb)
 431                         goto out;
 432         }
 433 
 434         skb_reset_mac_header(skb);
 435         skb->protocol = eth_type_trans(skb, soft_iface);
 436         batadv_inc_counter(bat_priv, BATADV_CNT_RX);
 437         batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
 438                            skb->len + ETH_HLEN);
 439 
 440         netif_rx(skb);
 441 out:
 442         if (primary_if)
 443                 batadv_hardif_put(primary_if);
 444 }
 445 
 446 
 447 
 448 
 449 
 450 
 451 
 452 
 453 static void batadv_bla_loopdetect_report(struct work_struct *work)
 454 {
 455         struct batadv_bla_backbone_gw *backbone_gw;
 456         struct batadv_priv *bat_priv;
 457         char vid_str[6] = { '\0' };
 458 
 459         backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
 460                                    report_work);
 461         bat_priv = backbone_gw->bat_priv;
 462 
 463         batadv_info(bat_priv->soft_iface,
 464                     "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
 465                     batadv_print_vid(backbone_gw->vid));
 466         snprintf(vid_str, sizeof(vid_str), "%d",
 467                  batadv_print_vid(backbone_gw->vid));
 468         vid_str[sizeof(vid_str) - 1] = 0;
 469 
 470         batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
 471                             vid_str);
 472 
 473         batadv_backbone_gw_put(backbone_gw);
 474 }
 475 
 476 
 477 
 478 
 479 
 480 
 481 
 482 
 483 
 484 
 485 static struct batadv_bla_backbone_gw *
 486 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
 487                            unsigned short vid, bool own_backbone)
 488 {
 489         struct batadv_bla_backbone_gw *entry;
 490         struct batadv_orig_node *orig_node;
 491         int hash_added;
 492 
 493         entry = batadv_backbone_hash_find(bat_priv, orig, vid);
 494 
 495         if (entry)
 496                 return entry;
 497 
 498         batadv_dbg(BATADV_DBG_BLA, bat_priv,
 499                    "%s(): not found (%pM, %d), creating new entry\n", __func__,
 500                    orig, batadv_print_vid(vid));
 501 
 502         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 503         if (!entry)
 504                 return NULL;
 505 
 506         entry->vid = vid;
 507         entry->lasttime = jiffies;
 508         entry->crc = BATADV_BLA_CRC_INIT;
 509         entry->bat_priv = bat_priv;
 510         spin_lock_init(&entry->crc_lock);
 511         atomic_set(&entry->request_sent, 0);
 512         atomic_set(&entry->wait_periods, 0);
 513         ether_addr_copy(entry->orig, orig);
 514         INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
 515         kref_init(&entry->refcount);
 516 
 517         kref_get(&entry->refcount);
 518         hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
 519                                      batadv_compare_backbone_gw,
 520                                      batadv_choose_backbone_gw, entry,
 521                                      &entry->hash_entry);
 522 
 523         if (unlikely(hash_added != 0)) {
 524                 
 525                 kfree(entry);
 526                 return NULL;
 527         }
 528 
 529         
 530         orig_node = batadv_orig_hash_find(bat_priv, orig);
 531         if (orig_node) {
 532                 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
 533                                           "became a backbone gateway");
 534                 batadv_orig_node_put(orig_node);
 535         }
 536 
 537         if (own_backbone) {
 538                 batadv_bla_send_announce(bat_priv, entry);
 539 
 540                 
 541                 atomic_inc(&entry->request_sent);
 542                 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
 543                 atomic_inc(&bat_priv->bla.num_requests);
 544         }
 545 
 546         return entry;
 547 }
 548 
 549 
 550 
 551 
 552 
 553 
 554 
 555 
 556 
 557 
 558 static void
 559 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
 560                                   struct batadv_hard_iface *primary_if,
 561                                   unsigned short vid)
 562 {
 563         struct batadv_bla_backbone_gw *backbone_gw;
 564 
 565         backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
 566                                                  primary_if->net_dev->dev_addr,
 567                                                  vid, true);
 568         if (unlikely(!backbone_gw))
 569                 return;
 570 
 571         backbone_gw->lasttime = jiffies;
 572         batadv_backbone_gw_put(backbone_gw);
 573 }
 574 
 575 
 576 
 577 
 578 
 579 
 580 
 581 
 582 
 583 
 584 static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
 585                                       struct batadv_hard_iface *primary_if,
 586                                       unsigned short vid)
 587 {
 588         struct hlist_head *head;
 589         struct batadv_hashtable *hash;
 590         struct batadv_bla_claim *claim;
 591         struct batadv_bla_backbone_gw *backbone_gw;
 592         int i;
 593 
 594         batadv_dbg(BATADV_DBG_BLA, bat_priv,
 595                    "%s(): received a claim request, send all of our own claims again\n",
 596                    __func__);
 597 
 598         backbone_gw = batadv_backbone_hash_find(bat_priv,
 599                                                 primary_if->net_dev->dev_addr,
 600                                                 vid);
 601         if (!backbone_gw)
 602                 return;
 603 
 604         hash = bat_priv->bla.claim_hash;
 605         for (i = 0; i < hash->size; i++) {
 606                 head = &hash->table[i];
 607 
 608                 rcu_read_lock();
 609                 hlist_for_each_entry_rcu(claim, head, hash_entry) {
 610                         
 611                         if (claim->backbone_gw != backbone_gw)
 612                                 continue;
 613 
 614                         batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
 615                                               BATADV_CLAIM_TYPE_CLAIM);
 616                 }
 617                 rcu_read_unlock();
 618         }
 619 
 620         
 621         batadv_bla_send_announce(bat_priv, backbone_gw);
 622         batadv_backbone_gw_put(backbone_gw);
 623 }
 624 
 625 
 626 
 627 
 628 
 629 
 630 
 631 
 632 
 633 static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
 634 {
 635         
 636         batadv_bla_del_backbone_claims(backbone_gw);
 637 
 638         batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
 639                    "Sending REQUEST to %pM\n", backbone_gw->orig);
 640 
 641         
 642         batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
 643                               backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
 644 
 645         
 646         if (!atomic_read(&backbone_gw->request_sent)) {
 647                 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
 648                 atomic_set(&backbone_gw->request_sent, 1);
 649         }
 650 }
 651 
 652 
 653 
 654 
 655 
 656 
 657 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
 658                                      struct batadv_bla_backbone_gw *backbone_gw)
 659 {
 660         u8 mac[ETH_ALEN];
 661         __be16 crc;
 662 
 663         memcpy(mac, batadv_announce_mac, 4);
 664         spin_lock_bh(&backbone_gw->crc_lock);
 665         crc = htons(backbone_gw->crc);
 666         spin_unlock_bh(&backbone_gw->crc_lock);
 667         memcpy(&mac[4], &crc, 2);
 668 
 669         batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
 670                               BATADV_CLAIM_TYPE_ANNOUNCE);
 671 }
 672 
 673 
 674 
 675 
 676 
 677 
 678 
 679 
 680 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
 681                                  const u8 *mac, const unsigned short vid,
 682                                  struct batadv_bla_backbone_gw *backbone_gw)
 683 {
 684         struct batadv_bla_backbone_gw *old_backbone_gw;
 685         struct batadv_bla_claim *claim;
 686         struct batadv_bla_claim search_claim;
 687         bool remove_crc = false;
 688         int hash_added;
 689 
 690         ether_addr_copy(search_claim.addr, mac);
 691         search_claim.vid = vid;
 692         claim = batadv_claim_hash_find(bat_priv, &search_claim);
 693 
 694         
 695         if (!claim) {
 696                 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
 697                 if (!claim)
 698                         return;
 699 
 700                 ether_addr_copy(claim->addr, mac);
 701                 spin_lock_init(&claim->backbone_lock);
 702                 claim->vid = vid;
 703                 claim->lasttime = jiffies;
 704                 kref_get(&backbone_gw->refcount);
 705                 claim->backbone_gw = backbone_gw;
 706                 kref_init(&claim->refcount);
 707 
 708                 batadv_dbg(BATADV_DBG_BLA, bat_priv,
 709                            "%s(): adding new entry %pM, vid %d to hash ...\n",
 710                            __func__, mac, batadv_print_vid(vid));
 711 
 712                 kref_get(&claim->refcount);
 713                 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
 714                                              batadv_compare_claim,
 715                                              batadv_choose_claim, claim,
 716                                              &claim->hash_entry);
 717 
 718                 if (unlikely(hash_added != 0)) {
 719                         
 720                         kfree(claim);
 721                         return;
 722                 }
 723         } else {
 724                 claim->lasttime = jiffies;
 725                 if (claim->backbone_gw == backbone_gw)
 726                         
 727                         goto claim_free_ref;
 728 
 729                 batadv_dbg(BATADV_DBG_BLA, bat_priv,
 730                            "%s(): changing ownership for %pM, vid %d to gw %pM\n",
 731                            __func__, mac, batadv_print_vid(vid),
 732                            backbone_gw->orig);
 733 
 734                 remove_crc = true;
 735         }
 736 
 737         
 738         spin_lock_bh(&claim->backbone_lock);
 739         old_backbone_gw = claim->backbone_gw;
 740         kref_get(&backbone_gw->refcount);
 741         claim->backbone_gw = backbone_gw;
 742         spin_unlock_bh(&claim->backbone_lock);
 743 
 744         if (remove_crc) {
 745                 
 746                 spin_lock_bh(&old_backbone_gw->crc_lock);
 747                 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
 748                 spin_unlock_bh(&old_backbone_gw->crc_lock);
 749         }
 750 
 751         batadv_backbone_gw_put(old_backbone_gw);
 752 
 753         
 754         spin_lock_bh(&backbone_gw->crc_lock);
 755         backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
 756         spin_unlock_bh(&backbone_gw->crc_lock);
 757         backbone_gw->lasttime = jiffies;
 758 
 759 claim_free_ref:
 760         batadv_claim_put(claim);
 761 }
 762 
 763 
 764 
 765 
 766 
 767 
 768 
 769 
 770 static struct batadv_bla_backbone_gw *
 771 batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
 772 {
 773         struct batadv_bla_backbone_gw *backbone_gw;
 774 
 775         spin_lock_bh(&claim->backbone_lock);
 776         backbone_gw = claim->backbone_gw;
 777         kref_get(&backbone_gw->refcount);
 778         spin_unlock_bh(&claim->backbone_lock);
 779 
 780         return backbone_gw;
 781 }
 782 
 783 
 784 
 785 
 786 
 787 
 788 
 789 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
 790                                  const u8 *mac, const unsigned short vid)
 791 {
 792         struct batadv_bla_claim search_claim, *claim;
 793         struct batadv_bla_claim *claim_removed_entry;
 794         struct hlist_node *claim_removed_node;
 795 
 796         ether_addr_copy(search_claim.addr, mac);
 797         search_claim.vid = vid;
 798         claim = batadv_claim_hash_find(bat_priv, &search_claim);
 799         if (!claim)
 800                 return;
 801 
 802         batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
 803                    mac, batadv_print_vid(vid));
 804 
 805         claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
 806                                                 batadv_compare_claim,
 807                                                 batadv_choose_claim, claim);
 808         if (!claim_removed_node)
 809                 goto free_claim;
 810 
 811         
 812         claim_removed_entry = hlist_entry(claim_removed_node,
 813                                           struct batadv_bla_claim, hash_entry);
 814         batadv_claim_put(claim_removed_entry);
 815 
 816 free_claim:
 817         
 818         batadv_claim_put(claim);
 819 }
 820 
 821 
 822 
 823 
 824 
 825 
 826 
 827 
 828 
 829 
 830 static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
 831                                    u8 *backbone_addr, unsigned short vid)
 832 {
 833         struct batadv_bla_backbone_gw *backbone_gw;
 834         u16 backbone_crc, crc;
 835 
 836         if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
 837                 return false;
 838 
 839         backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
 840                                                  false);
 841 
 842         if (unlikely(!backbone_gw))
 843                 return true;
 844 
 845         
 846         backbone_gw->lasttime = jiffies;
 847         crc = ntohs(*((__be16 *)(&an_addr[4])));
 848 
 849         batadv_dbg(BATADV_DBG_BLA, bat_priv,
 850                    "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
 851                    __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
 852 
 853         spin_lock_bh(&backbone_gw->crc_lock);
 854         backbone_crc = backbone_gw->crc;
 855         spin_unlock_bh(&backbone_gw->crc_lock);
 856 
 857         if (backbone_crc != crc) {
 858                 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
 859                            "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
 860                            __func__, backbone_gw->orig,
 861                            batadv_print_vid(backbone_gw->vid),
 862                            backbone_crc, crc);
 863 
 864                 batadv_bla_send_request(backbone_gw);
 865         } else {
 866                 
 867 
 868 
 869                 if (atomic_read(&backbone_gw->request_sent)) {
 870                         atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
 871                         atomic_set(&backbone_gw->request_sent, 0);
 872                 }
 873         }
 874 
 875         batadv_backbone_gw_put(backbone_gw);
 876         return true;
 877 }
 878 
 879 
 880 
 881 
 882 
 883 
 884 
 885 
 886 
 887 
 888 
 889 static bool batadv_handle_request(struct batadv_priv *bat_priv,
 890                                   struct batadv_hard_iface *primary_if,
 891                                   u8 *backbone_addr, struct ethhdr *ethhdr,
 892                                   unsigned short vid)
 893 {
 894         
 895         if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
 896                 return false;
 897 
 898         
 899 
 900 
 901         if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
 902                 return true;
 903 
 904         batadv_dbg(BATADV_DBG_BLA, bat_priv,
 905                    "%s(): REQUEST vid %d (sent by %pM)...\n",
 906                    __func__, batadv_print_vid(vid), ethhdr->h_source);
 907 
 908         batadv_bla_answer_request(bat_priv, primary_if, vid);
 909         return true;
 910 }
 911 
 912 
 913 
 914 
 915 
 916 
 917 
 918 
 919 
 920 
 921 
 922 static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
 923                                   struct batadv_hard_iface *primary_if,
 924                                   u8 *backbone_addr, u8 *claim_addr,
 925                                   unsigned short vid)
 926 {
 927         struct batadv_bla_backbone_gw *backbone_gw;
 928 
 929         
 930         if (primary_if && batadv_compare_eth(backbone_addr,
 931                                              primary_if->net_dev->dev_addr))
 932                 batadv_bla_send_claim(bat_priv, claim_addr, vid,
 933                                       BATADV_CLAIM_TYPE_UNCLAIM);
 934 
 935         backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
 936 
 937         if (!backbone_gw)
 938                 return true;
 939 
 940         
 941         batadv_dbg(BATADV_DBG_BLA, bat_priv,
 942                    "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
 943                    claim_addr, batadv_print_vid(vid), backbone_gw->orig);
 944 
 945         batadv_bla_del_claim(bat_priv, claim_addr, vid);
 946         batadv_backbone_gw_put(backbone_gw);
 947         return true;
 948 }
 949 
 950 
 951 
 952 
 953 
 954 
 955 
 956 
 957 
 958 
 959 
 960 static bool batadv_handle_claim(struct batadv_priv *bat_priv,
 961                                 struct batadv_hard_iface *primary_if,
 962                                 u8 *backbone_addr, u8 *claim_addr,
 963                                 unsigned short vid)
 964 {
 965         struct batadv_bla_backbone_gw *backbone_gw;
 966 
 967         
 968 
 969         backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
 970                                                  false);
 971 
 972         if (unlikely(!backbone_gw))
 973                 return true;
 974 
 975         
 976         batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
 977         if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
 978                 batadv_bla_send_claim(bat_priv, claim_addr, vid,
 979                                       BATADV_CLAIM_TYPE_CLAIM);
 980 
 981         
 982 
 983         batadv_backbone_gw_put(backbone_gw);
 984         return true;
 985 }
 986 
 987 
 988 
 989 
 990 
 991 
 992 
 993 
 994 
 995 
 996 
 997 
 998 
 999 
1000 
1001 
1002 
1003 
1004 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
1005                                     struct batadv_hard_iface *primary_if,
1006                                     u8 *hw_src, u8 *hw_dst,
1007                                     struct ethhdr *ethhdr)
1008 {
1009         u8 *backbone_addr;
1010         struct batadv_orig_node *orig_node;
1011         struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1012 
1013         bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1014         bla_dst_own = &bat_priv->bla.claim_dest;
1015 
1016         
1017 
1018 
1019         switch (bla_dst->type) {
1020         case BATADV_CLAIM_TYPE_CLAIM:
1021                 backbone_addr = hw_src;
1022                 break;
1023         case BATADV_CLAIM_TYPE_REQUEST:
1024         case BATADV_CLAIM_TYPE_ANNOUNCE:
1025         case BATADV_CLAIM_TYPE_UNCLAIM:
1026                 backbone_addr = ethhdr->h_source;
1027                 break;
1028         default:
1029                 return 0;
1030         }
1031 
1032         
1033         if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1034                 return 0;
1035 
1036         
1037         if (bla_dst->group == bla_dst_own->group)
1038                 return 2;
1039 
1040         
1041         orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1042 
1043         
1044 
1045 
1046         if (!orig_node)
1047                 return 1;
1048 
1049         
1050         if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1051                 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1052                            "taking other backbones claim group: %#.4x\n",
1053                            ntohs(bla_dst->group));
1054                 bla_dst_own->group = bla_dst->group;
1055         }
1056 
1057         batadv_orig_node_put(orig_node);
1058 
1059         return 2;
1060 }
1061 
1062 
1063 
1064 
1065 
1066 
1067 
1068 
1069 
1070 
1071 static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
1072                                      struct batadv_hard_iface *primary_if,
1073                                      struct sk_buff *skb)
1074 {
1075         struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1076         u8 *hw_src, *hw_dst;
1077         struct vlan_hdr *vhdr, vhdr_buf;
1078         struct ethhdr *ethhdr;
1079         struct arphdr *arphdr;
1080         unsigned short vid;
1081         int vlan_depth = 0;
1082         __be16 proto;
1083         int headlen;
1084         int ret;
1085 
1086         vid = batadv_get_vid(skb, 0);
1087         ethhdr = eth_hdr(skb);
1088 
1089         proto = ethhdr->h_proto;
1090         headlen = ETH_HLEN;
1091         if (vid & BATADV_VLAN_HAS_TAG) {
1092                 
1093 
1094 
1095 
1096 
1097 
1098 
1099 
1100                 do {
1101                         vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
1102                                                   &vhdr_buf);
1103                         if (!vhdr)
1104                                 return false;
1105 
1106                         proto = vhdr->h_vlan_encapsulated_proto;
1107                         headlen += VLAN_HLEN;
1108                         vlan_depth++;
1109                 } while (proto == htons(ETH_P_8021Q));
1110         }
1111 
1112         if (proto != htons(ETH_P_ARP))
1113                 return false; 
1114 
1115         
1116 
1117         if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1118                 return false;
1119 
1120         
1121         ethhdr = eth_hdr(skb);
1122         arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1123 
1124         
1125 
1126 
1127         if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1128                 return false;
1129         if (arphdr->ar_pro != htons(ETH_P_IP))
1130                 return false;
1131         if (arphdr->ar_hln != ETH_ALEN)
1132                 return false;
1133         if (arphdr->ar_pln != 4)
1134                 return false;
1135 
1136         hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1137         hw_dst = hw_src + ETH_ALEN + 4;
1138         bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1139         bla_dst_own = &bat_priv->bla.claim_dest;
1140 
1141         
1142         if (memcmp(bla_dst->magic, bla_dst_own->magic,
1143                    sizeof(bla_dst->magic)) != 0)
1144                 return false;
1145 
1146         
1147 
1148 
1149 
1150         if (vlan_depth > 1)
1151                 return true;
1152 
1153         
1154         if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1155                 return false;
1156 
1157         
1158         ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
1159                                        ethhdr);
1160         if (ret == 1)
1161                 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1162                            "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1163                            __func__, ethhdr->h_source, batadv_print_vid(vid),
1164                            hw_src, hw_dst);
1165 
1166         if (ret < 2)
1167                 return !!ret;
1168 
1169         
1170         batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1171 
1172         
1173         switch (bla_dst->type) {
1174         case BATADV_CLAIM_TYPE_CLAIM:
1175                 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
1176                                         ethhdr->h_source, vid))
1177                         return true;
1178                 break;
1179         case BATADV_CLAIM_TYPE_UNCLAIM:
1180                 if (batadv_handle_unclaim(bat_priv, primary_if,
1181                                           ethhdr->h_source, hw_src, vid))
1182                         return true;
1183                 break;
1184 
1185         case BATADV_CLAIM_TYPE_ANNOUNCE:
1186                 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
1187                                            vid))
1188                         return true;
1189                 break;
1190         case BATADV_CLAIM_TYPE_REQUEST:
1191                 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
1192                                           vid))
1193                         return true;
1194                 break;
1195         }
1196 
1197         batadv_dbg(BATADV_DBG_BLA, bat_priv,
1198                    "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1199                    __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
1200                    hw_dst);
1201         return true;
1202 }
1203 
1204 
1205 
1206 
1207 
1208 
1209 
1210 
1211 
1212 
1213 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1214 {
1215         struct batadv_bla_backbone_gw *backbone_gw;
1216         struct hlist_node *node_tmp;
1217         struct hlist_head *head;
1218         struct batadv_hashtable *hash;
1219         spinlock_t *list_lock;  
1220         int i;
1221 
1222         hash = bat_priv->bla.backbone_hash;
1223         if (!hash)
1224                 return;
1225 
1226         for (i = 0; i < hash->size; i++) {
1227                 head = &hash->table[i];
1228                 list_lock = &hash->list_locks[i];
1229 
1230                 spin_lock_bh(list_lock);
1231                 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1232                                           head, hash_entry) {
1233                         if (now)
1234                                 goto purge_now;
1235                         if (!batadv_has_timed_out(backbone_gw->lasttime,
1236                                                   BATADV_BLA_BACKBONE_TIMEOUT))
1237                                 continue;
1238 
1239                         batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1240                                    "%s(): backbone gw %pM timed out\n",
1241                                    __func__, backbone_gw->orig);
1242 
1243 purge_now:
1244                         
1245                         if (atomic_read(&backbone_gw->request_sent))
1246                                 atomic_dec(&bat_priv->bla.num_requests);
1247 
1248                         batadv_bla_del_backbone_claims(backbone_gw);
1249 
1250                         hlist_del_rcu(&backbone_gw->hash_entry);
1251                         batadv_backbone_gw_put(backbone_gw);
1252                 }
1253                 spin_unlock_bh(list_lock);
1254         }
1255 }
1256 
1257 
1258 
1259 
1260 
1261 
1262 
1263 
1264 
1265 
1266 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1267                                     struct batadv_hard_iface *primary_if,
1268                                     int now)
1269 {
1270         struct batadv_bla_backbone_gw *backbone_gw;
1271         struct batadv_bla_claim *claim;
1272         struct hlist_head *head;
1273         struct batadv_hashtable *hash;
1274         int i;
1275 
1276         hash = bat_priv->bla.claim_hash;
1277         if (!hash)
1278                 return;
1279 
1280         for (i = 0; i < hash->size; i++) {
1281                 head = &hash->table[i];
1282 
1283                 rcu_read_lock();
1284                 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1285                         backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1286                         if (now)
1287                                 goto purge_now;
1288 
1289                         if (!batadv_compare_eth(backbone_gw->orig,
1290                                                 primary_if->net_dev->dev_addr))
1291                                 goto skip;
1292 
1293                         if (!batadv_has_timed_out(claim->lasttime,
1294                                                   BATADV_BLA_CLAIM_TIMEOUT))
1295                                 goto skip;
1296 
1297                         batadv_dbg(BATADV_DBG_BLA, bat_priv,
1298                                    "%s(): timed out.\n", __func__);
1299 
1300 purge_now:
1301                         batadv_dbg(BATADV_DBG_BLA, bat_priv,
1302                                    "%s(): %pM, vid %d\n", __func__,
1303                                    claim->addr, claim->vid);
1304 
1305                         batadv_handle_unclaim(bat_priv, primary_if,
1306                                               backbone_gw->orig,
1307                                               claim->addr, claim->vid);
1308 skip:
1309                         batadv_backbone_gw_put(backbone_gw);
1310                 }
1311                 rcu_read_unlock();
1312         }
1313 }
1314 
1315 
1316 
1317 
1318 
1319 
1320 
1321 
1322 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1323                                     struct batadv_hard_iface *primary_if,
1324                                     struct batadv_hard_iface *oldif)
1325 {
1326         struct batadv_bla_backbone_gw *backbone_gw;
1327         struct hlist_head *head;
1328         struct batadv_hashtable *hash;
1329         __be16 group;
1330         int i;
1331 
1332         
1333         group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1334         bat_priv->bla.claim_dest.group = group;
1335 
1336         
1337         if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1338                 oldif = NULL;
1339 
1340         if (!oldif) {
1341                 batadv_bla_purge_claims(bat_priv, NULL, 1);
1342                 batadv_bla_purge_backbone_gw(bat_priv, 1);
1343                 return;
1344         }
1345 
1346         hash = bat_priv->bla.backbone_hash;
1347         if (!hash)
1348                 return;
1349 
1350         for (i = 0; i < hash->size; i++) {
1351                 head = &hash->table[i];
1352 
1353                 rcu_read_lock();
1354                 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1355                         
1356                         if (!batadv_compare_eth(backbone_gw->orig,
1357                                                 oldif->net_dev->dev_addr))
1358                                 continue;
1359 
1360                         ether_addr_copy(backbone_gw->orig,
1361                                         primary_if->net_dev->dev_addr);
1362                         
1363 
1364 
1365                         batadv_bla_send_announce(bat_priv, backbone_gw);
1366                 }
1367                 rcu_read_unlock();
1368         }
1369 }
1370 
1371 
1372 
1373 
1374 
1375 
1376 
1377 
1378 
1379 
1380 
1381 static void
1382 batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
1383                            struct batadv_bla_backbone_gw *backbone_gw)
1384 {
1385         batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
1386                    backbone_gw->vid);
1387         batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
1388                               backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
1389 }
1390 
1391 
1392 
1393 
1394 
1395 void batadv_bla_status_update(struct net_device *net_dev)
1396 {
1397         struct batadv_priv *bat_priv = netdev_priv(net_dev);
1398         struct batadv_hard_iface *primary_if;
1399 
1400         primary_if = batadv_primary_if_get_selected(bat_priv);
1401         if (!primary_if)
1402                 return;
1403 
1404         
1405 
1406 
1407         batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1408         batadv_hardif_put(primary_if);
1409 }
1410 
1411 
1412 
1413 
1414 
1415 
1416 
1417 
1418 
1419 static void batadv_bla_periodic_work(struct work_struct *work)
1420 {
1421         struct delayed_work *delayed_work;
1422         struct batadv_priv *bat_priv;
1423         struct batadv_priv_bla *priv_bla;
1424         struct hlist_head *head;
1425         struct batadv_bla_backbone_gw *backbone_gw;
1426         struct batadv_hashtable *hash;
1427         struct batadv_hard_iface *primary_if;
1428         bool send_loopdetect = false;
1429         int i;
1430 
1431         delayed_work = to_delayed_work(work);
1432         priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1433         bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1434         primary_if = batadv_primary_if_get_selected(bat_priv);
1435         if (!primary_if)
1436                 goto out;
1437 
1438         batadv_bla_purge_claims(bat_priv, primary_if, 0);
1439         batadv_bla_purge_backbone_gw(bat_priv, 0);
1440 
1441         if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1442                 goto out;
1443 
1444         if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
1445                 
1446 
1447 
1448 
1449                 eth_random_addr(bat_priv->bla.loopdetect_addr);
1450                 bat_priv->bla.loopdetect_addr[0] = 0xba;
1451                 bat_priv->bla.loopdetect_addr[1] = 0xbe;
1452                 bat_priv->bla.loopdetect_lasttime = jiffies;
1453                 atomic_set(&bat_priv->bla.loopdetect_next,
1454                            BATADV_BLA_LOOPDETECT_PERIODS);
1455 
1456                 
1457                 send_loopdetect = true;
1458         }
1459 
1460         hash = bat_priv->bla.backbone_hash;
1461         if (!hash)
1462                 goto out;
1463 
1464         for (i = 0; i < hash->size; i++) {
1465                 head = &hash->table[i];
1466 
1467                 rcu_read_lock();
1468                 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1469                         if (!batadv_compare_eth(backbone_gw->orig,
1470                                                 primary_if->net_dev->dev_addr))
1471                                 continue;
1472 
1473                         backbone_gw->lasttime = jiffies;
1474 
1475                         batadv_bla_send_announce(bat_priv, backbone_gw);
1476                         if (send_loopdetect)
1477                                 batadv_bla_send_loopdetect(bat_priv,
1478                                                            backbone_gw);
1479 
1480                         
1481 
1482 
1483 
1484 
1485 
1486 
1487 
1488 
1489                         if (atomic_read(&backbone_gw->request_sent) == 0)
1490                                 continue;
1491 
1492                         if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1493                                 continue;
1494 
1495                         atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1496                         atomic_set(&backbone_gw->request_sent, 0);
1497                 }
1498                 rcu_read_unlock();
1499         }
1500 out:
1501         if (primary_if)
1502                 batadv_hardif_put(primary_if);
1503 
1504         queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1505                            msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1506 }
1507 
1508 
1509 
1510 
1511 
1512 
1513 static struct lock_class_key batadv_claim_hash_lock_class_key;
1514 static struct lock_class_key batadv_backbone_hash_lock_class_key;
1515 
1516 
1517 
1518 
1519 
1520 
1521 
1522 int batadv_bla_init(struct batadv_priv *bat_priv)
1523 {
1524         int i;
1525         u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1526         struct batadv_hard_iface *primary_if;
1527         u16 crc;
1528         unsigned long entrytime;
1529 
1530         spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1531 
1532         batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1533 
1534         
1535         memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1536         bat_priv->bla.claim_dest.type = 0;
1537         primary_if = batadv_primary_if_get_selected(bat_priv);
1538         if (primary_if) {
1539                 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1540                 bat_priv->bla.claim_dest.group = htons(crc);
1541                 batadv_hardif_put(primary_if);
1542         } else {
1543                 bat_priv->bla.claim_dest.group = 0; 
1544         }
1545 
1546         
1547         entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1548         for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1549                 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1550         bat_priv->bla.bcast_duplist_curr = 0;
1551 
1552         atomic_set(&bat_priv->bla.loopdetect_next,
1553                    BATADV_BLA_LOOPDETECT_PERIODS);
1554 
1555         if (bat_priv->bla.claim_hash)
1556                 return 0;
1557 
1558         bat_priv->bla.claim_hash = batadv_hash_new(128);
1559         bat_priv->bla.backbone_hash = batadv_hash_new(32);
1560 
1561         if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1562                 return -ENOMEM;
1563 
1564         batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1565                                    &batadv_claim_hash_lock_class_key);
1566         batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1567                                    &batadv_backbone_hash_lock_class_key);
1568 
1569         batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1570 
1571         INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1572 
1573         queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1574                            msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1575         return 0;
1576 }
1577 
1578 
1579 
1580 
1581 
1582 
1583 
1584 
1585 
1586 
1587 
1588 
1589 
1590 
1591 
1592 
1593 
1594 bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1595                                     struct sk_buff *skb)
1596 {
1597         int i, curr;
1598         __be32 crc;
1599         struct batadv_bcast_packet *bcast_packet;
1600         struct batadv_bcast_duplist_entry *entry;
1601         bool ret = false;
1602 
1603         bcast_packet = (struct batadv_bcast_packet *)skb->data;
1604 
1605         
1606         crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1607 
1608         spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1609 
1610         for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1611                 curr = (bat_priv->bla.bcast_duplist_curr + i);
1612                 curr %= BATADV_DUPLIST_SIZE;
1613                 entry = &bat_priv->bla.bcast_duplist[curr];
1614 
1615                 
1616 
1617 
1618                 if (batadv_has_timed_out(entry->entrytime,
1619                                          BATADV_DUPLIST_TIMEOUT))
1620                         break;
1621 
1622                 if (entry->crc != crc)
1623                         continue;
1624 
1625                 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1626                         continue;
1627 
1628                 
1629 
1630 
1631                 ret = true;
1632                 goto out;
1633         }
1634         
1635 
1636 
1637         curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1638         curr %= BATADV_DUPLIST_SIZE;
1639         entry = &bat_priv->bla.bcast_duplist[curr];
1640         entry->crc = crc;
1641         entry->entrytime = jiffies;
1642         ether_addr_copy(entry->orig, bcast_packet->orig);
1643         bat_priv->bla.bcast_duplist_curr = curr;
1644 
1645 out:
1646         spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1647 
1648         return ret;
1649 }
1650 
1651 
1652 
1653 
1654 
1655 
1656 
1657 
1658 
1659 
1660 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1661                                     unsigned short vid)
1662 {
1663         struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1664         struct hlist_head *head;
1665         struct batadv_bla_backbone_gw *backbone_gw;
1666         int i;
1667 
1668         if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1669                 return false;
1670 
1671         if (!hash)
1672                 return false;
1673 
1674         for (i = 0; i < hash->size; i++) {
1675                 head = &hash->table[i];
1676 
1677                 rcu_read_lock();
1678                 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1679                         if (batadv_compare_eth(backbone_gw->orig, orig) &&
1680                             backbone_gw->vid == vid) {
1681                                 rcu_read_unlock();
1682                                 return true;
1683                         }
1684                 }
1685                 rcu_read_unlock();
1686         }
1687 
1688         return false;
1689 }
1690 
1691 
1692 
1693 
1694 
1695 
1696 
1697 
1698 
1699 
1700 bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
1701                                struct batadv_orig_node *orig_node, int hdr_size)
1702 {
1703         struct batadv_bla_backbone_gw *backbone_gw;
1704         unsigned short vid;
1705 
1706         if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1707                 return false;
1708 
1709         
1710         if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1711                 return false;
1712 
1713         vid = batadv_get_vid(skb, hdr_size);
1714 
1715         
1716         backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1717                                                 orig_node->orig, vid);
1718         if (!backbone_gw)
1719                 return false;
1720 
1721         batadv_backbone_gw_put(backbone_gw);
1722         return true;
1723 }
1724 
1725 
1726 
1727 
1728 
1729 
1730 
1731 void batadv_bla_free(struct batadv_priv *bat_priv)
1732 {
1733         struct batadv_hard_iface *primary_if;
1734 
1735         cancel_delayed_work_sync(&bat_priv->bla.work);
1736         primary_if = batadv_primary_if_get_selected(bat_priv);
1737 
1738         if (bat_priv->bla.claim_hash) {
1739                 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1740                 batadv_hash_destroy(bat_priv->bla.claim_hash);
1741                 bat_priv->bla.claim_hash = NULL;
1742         }
1743         if (bat_priv->bla.backbone_hash) {
1744                 batadv_bla_purge_backbone_gw(bat_priv, 1);
1745                 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1746                 bat_priv->bla.backbone_hash = NULL;
1747         }
1748         if (primary_if)
1749                 batadv_hardif_put(primary_if);
1750 }
1751 
1752 
1753 
1754 
1755 
1756 
1757 
1758 
1759 
1760 
1761 
1762 
1763 
1764 
1765 static bool
1766 batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1767                             struct batadv_hard_iface *primary_if,
1768                             unsigned short vid)
1769 {
1770         struct batadv_bla_backbone_gw *backbone_gw;
1771         struct ethhdr *ethhdr;
1772         bool ret;
1773 
1774         ethhdr = eth_hdr(skb);
1775 
1776         
1777 
1778 
1779         if (!batadv_compare_eth(ethhdr->h_source,
1780                                 bat_priv->bla.loopdetect_addr))
1781                 return false;
1782 
1783         
1784 
1785 
1786         if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
1787                                  BATADV_BLA_LOOPDETECT_TIMEOUT))
1788                 return true;
1789 
1790         backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
1791                                                  primary_if->net_dev->dev_addr,
1792                                                  vid, true);
1793         if (unlikely(!backbone_gw))
1794                 return true;
1795 
1796         ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1797 
1798         
1799 
1800 
1801         if (!ret)
1802                 batadv_backbone_gw_put(backbone_gw);
1803 
1804         return true;
1805 }
1806 
1807 
1808 
1809 
1810 
1811 
1812 
1813 
1814 
1815 
1816 
1817 
1818 
1819 
1820 
1821 
1822 
1823 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1824                    unsigned short vid, bool is_bcast)
1825 {
1826         struct batadv_bla_backbone_gw *backbone_gw;
1827         struct ethhdr *ethhdr;
1828         struct batadv_bla_claim search_claim, *claim = NULL;
1829         struct batadv_hard_iface *primary_if;
1830         bool own_claim;
1831         bool ret;
1832 
1833         ethhdr = eth_hdr(skb);
1834 
1835         primary_if = batadv_primary_if_get_selected(bat_priv);
1836         if (!primary_if)
1837                 goto handled;
1838 
1839         if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1840                 goto allow;
1841 
1842         if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
1843                 goto handled;
1844 
1845         if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1846                 
1847                 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1848                         goto handled;
1849 
1850         ether_addr_copy(search_claim.addr, ethhdr->h_source);
1851         search_claim.vid = vid;
1852         claim = batadv_claim_hash_find(bat_priv, &search_claim);
1853 
1854         if (!claim) {
1855                 
1856                 
1857 
1858 
1859                 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1860                            "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
1861                            __func__, ethhdr->h_source,
1862                            batadv_is_my_client(bat_priv,
1863                                                ethhdr->h_source, vid) ?
1864                            "yes" : "no");
1865                 batadv_handle_claim(bat_priv, primary_if,
1866                                     primary_if->net_dev->dev_addr,
1867                                     ethhdr->h_source, vid);
1868                 goto allow;
1869         }
1870 
1871         
1872         backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1873         own_claim = batadv_compare_eth(backbone_gw->orig,
1874                                        primary_if->net_dev->dev_addr);
1875         batadv_backbone_gw_put(backbone_gw);
1876 
1877         if (own_claim) {
1878                 
1879                 claim->lasttime = jiffies;
1880                 goto allow;
1881         }
1882 
1883         
1884         if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1885                 
1886 
1887 
1888 
1889 
1890 
1891                 goto handled;
1892         } else {
1893                 
1894 
1895 
1896 
1897                 batadv_handle_claim(bat_priv, primary_if,
1898                                     primary_if->net_dev->dev_addr,
1899                                     ethhdr->h_source, vid);
1900                 goto allow;
1901         }
1902 allow:
1903         batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1904         ret = false;
1905         goto out;
1906 
1907 handled:
1908         kfree_skb(skb);
1909         ret = true;
1910 
1911 out:
1912         if (primary_if)
1913                 batadv_hardif_put(primary_if);
1914         if (claim)
1915                 batadv_claim_put(claim);
1916         return ret;
1917 }
1918 
1919 
1920 
1921 
1922 
1923 
1924 
1925 
1926 
1927 
1928 
1929 
1930 
1931 
1932 
1933 
1934 
1935 
1936 bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1937                    unsigned short vid)
1938 {
1939         struct ethhdr *ethhdr;
1940         struct batadv_bla_claim search_claim, *claim = NULL;
1941         struct batadv_bla_backbone_gw *backbone_gw;
1942         struct batadv_hard_iface *primary_if;
1943         bool client_roamed;
1944         bool ret = false;
1945 
1946         primary_if = batadv_primary_if_get_selected(bat_priv);
1947         if (!primary_if)
1948                 goto out;
1949 
1950         if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1951                 goto allow;
1952 
1953         if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1954                 goto handled;
1955 
1956         ethhdr = eth_hdr(skb);
1957 
1958         if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1959                 
1960                 if (is_multicast_ether_addr(ethhdr->h_dest))
1961                         goto handled;
1962 
1963         ether_addr_copy(search_claim.addr, ethhdr->h_source);
1964         search_claim.vid = vid;
1965 
1966         claim = batadv_claim_hash_find(bat_priv, &search_claim);
1967 
1968         
1969         if (!claim)
1970                 goto allow;
1971 
1972         
1973         backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1974         client_roamed = batadv_compare_eth(backbone_gw->orig,
1975                                            primary_if->net_dev->dev_addr);
1976         batadv_backbone_gw_put(backbone_gw);
1977 
1978         if (client_roamed) {
1979                 
1980 
1981 
1982                 if (batadv_has_timed_out(claim->lasttime, 100)) {
1983                         
1984 
1985 
1986 
1987                         batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
1988                                    __func__, ethhdr->h_source);
1989                         batadv_handle_unclaim(bat_priv, primary_if,
1990                                               primary_if->net_dev->dev_addr,
1991                                               ethhdr->h_source, vid);
1992                         goto allow;
1993                 } else {
1994                         batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
1995                                    __func__, ethhdr->h_source);
1996                         goto handled;
1997                 }
1998         }
1999 
2000         
2001         if (is_multicast_ether_addr(ethhdr->h_dest)) {
2002                 
2003 
2004 
2005                 goto handled;
2006         } else {
2007                 
2008 
2009 
2010                 goto allow;
2011         }
2012 allow:
2013         batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
2014         ret = false;
2015         goto out;
2016 handled:
2017         ret = true;
2018 out:
2019         if (primary_if)
2020                 batadv_hardif_put(primary_if);
2021         if (claim)
2022                 batadv_claim_put(claim);
2023         return ret;
2024 }
2025 
2026 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
2027 
2028 
2029 
2030 
2031 
2032 
2033 
2034 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
2035 {
2036         struct net_device *net_dev = (struct net_device *)seq->private;
2037         struct batadv_priv *bat_priv = netdev_priv(net_dev);
2038         struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2039         struct batadv_bla_backbone_gw *backbone_gw;
2040         struct batadv_bla_claim *claim;
2041         struct batadv_hard_iface *primary_if;
2042         struct hlist_head *head;
2043         u16 backbone_crc;
2044         u32 i;
2045         bool is_own;
2046         u8 *primary_addr;
2047 
2048         primary_if = batadv_seq_print_text_primary_if_get(seq);
2049         if (!primary_if)
2050                 goto out;
2051 
2052         primary_addr = primary_if->net_dev->dev_addr;
2053         seq_printf(seq,
2054                    "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
2055                    net_dev->name, primary_addr,
2056                    ntohs(bat_priv->bla.claim_dest.group));
2057         seq_puts(seq,
2058                  "   Client               VID      Originator        [o] (CRC   )\n");
2059         for (i = 0; i < hash->size; i++) {
2060                 head = &hash->table[i];
2061 
2062                 rcu_read_lock();
2063                 hlist_for_each_entry_rcu(claim, head, hash_entry) {
2064                         backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2065 
2066                         is_own = batadv_compare_eth(backbone_gw->orig,
2067                                                     primary_addr);
2068 
2069                         spin_lock_bh(&backbone_gw->crc_lock);
2070                         backbone_crc = backbone_gw->crc;
2071                         spin_unlock_bh(&backbone_gw->crc_lock);
2072                         seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
2073                                    claim->addr, batadv_print_vid(claim->vid),
2074                                    backbone_gw->orig,
2075                                    (is_own ? 'x' : ' '),
2076                                    backbone_crc);
2077 
2078                         batadv_backbone_gw_put(backbone_gw);
2079                 }
2080                 rcu_read_unlock();
2081         }
2082 out:
2083         if (primary_if)
2084                 batadv_hardif_put(primary_if);
2085         return 0;
2086 }
2087 #endif
2088 
2089 
2090 
2091 
2092 
2093 
2094 
2095 
2096 
2097 
2098 
2099 
2100 static int
2101 batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
2102                             struct netlink_callback *cb,
2103                             struct batadv_hard_iface *primary_if,
2104                             struct batadv_bla_claim *claim)
2105 {
2106         u8 *primary_addr = primary_if->net_dev->dev_addr;
2107         u16 backbone_crc;
2108         bool is_own;
2109         void *hdr;
2110         int ret = -EINVAL;
2111 
2112         hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2113                           &batadv_netlink_family, NLM_F_MULTI,
2114                           BATADV_CMD_GET_BLA_CLAIM);
2115         if (!hdr) {
2116                 ret = -ENOBUFS;
2117                 goto out;
2118         }
2119 
2120         genl_dump_check_consistent(cb, hdr);
2121 
2122         is_own = batadv_compare_eth(claim->backbone_gw->orig,
2123                                     primary_addr);
2124 
2125         spin_lock_bh(&claim->backbone_gw->crc_lock);
2126         backbone_crc = claim->backbone_gw->crc;
2127         spin_unlock_bh(&claim->backbone_gw->crc_lock);
2128 
2129         if (is_own)
2130                 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2131                         genlmsg_cancel(msg, hdr);
2132                         goto out;
2133                 }
2134 
2135         if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
2136             nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
2137             nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2138                     claim->backbone_gw->orig) ||
2139             nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2140                         backbone_crc)) {
2141                 genlmsg_cancel(msg, hdr);
2142                 goto out;
2143         }
2144 
2145         genlmsg_end(msg, hdr);
2146         ret = 0;
2147 
2148 out:
2149         return ret;
2150 }
2151 
2152 
2153 
2154 
2155 
2156 
2157 
2158 
2159 
2160 
2161 
2162 
2163 
2164 
2165 static int
2166 batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
2167                              struct netlink_callback *cb,
2168                              struct batadv_hard_iface *primary_if,
2169                              struct batadv_hashtable *hash, unsigned int bucket,
2170                              int *idx_skip)
2171 {
2172         struct batadv_bla_claim *claim;
2173         int idx = 0;
2174         int ret = 0;
2175 
2176         spin_lock_bh(&hash->list_locks[bucket]);
2177         cb->seq = atomic_read(&hash->generation) << 1 | 1;
2178 
2179         hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
2180                 if (idx++ < *idx_skip)
2181                         continue;
2182 
2183                 ret = batadv_bla_claim_dump_entry(msg, portid, cb,
2184                                                   primary_if, claim);
2185                 if (ret) {
2186                         *idx_skip = idx - 1;
2187                         goto unlock;
2188                 }
2189         }
2190 
2191         *idx_skip = 0;
2192 unlock:
2193         spin_unlock_bh(&hash->list_locks[bucket]);
2194         return ret;
2195 }
2196 
2197 
2198 
2199 
2200 
2201 
2202 
2203 
2204 int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
2205 {
2206         struct batadv_hard_iface *primary_if = NULL;
2207         int portid = NETLINK_CB(cb->skb).portid;
2208         struct net *net = sock_net(cb->skb->sk);
2209         struct net_device *soft_iface;
2210         struct batadv_hashtable *hash;
2211         struct batadv_priv *bat_priv;
2212         int bucket = cb->args[0];
2213         int idx = cb->args[1];
2214         int ifindex;
2215         int ret = 0;
2216 
2217         ifindex = batadv_netlink_get_ifindex(cb->nlh,
2218                                              BATADV_ATTR_MESH_IFINDEX);
2219         if (!ifindex)
2220                 return -EINVAL;
2221 
2222         soft_iface = dev_get_by_index(net, ifindex);
2223         if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2224                 ret = -ENODEV;
2225                 goto out;
2226         }
2227 
2228         bat_priv = netdev_priv(soft_iface);
2229         hash = bat_priv->bla.claim_hash;
2230 
2231         primary_if = batadv_primary_if_get_selected(bat_priv);
2232         if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2233                 ret = -ENOENT;
2234                 goto out;
2235         }
2236 
2237         while (bucket < hash->size) {
2238                 if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
2239                                                  hash, bucket, &idx))
2240                         break;
2241                 bucket++;
2242         }
2243 
2244         cb->args[0] = bucket;
2245         cb->args[1] = idx;
2246 
2247         ret = msg->len;
2248 
2249 out:
2250         if (primary_if)
2251                 batadv_hardif_put(primary_if);
2252 
2253         if (soft_iface)
2254                 dev_put(soft_iface);
2255 
2256         return ret;
2257 }
2258 
2259 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
2260 
2261 
2262 
2263 
2264 
2265 
2266 
2267 
2268 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
2269 {
2270         struct net_device *net_dev = (struct net_device *)seq->private;
2271         struct batadv_priv *bat_priv = netdev_priv(net_dev);
2272         struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
2273         struct batadv_bla_backbone_gw *backbone_gw;
2274         struct batadv_hard_iface *primary_if;
2275         struct hlist_head *head;
2276         int secs, msecs;
2277         u16 backbone_crc;
2278         u32 i;
2279         bool is_own;
2280         u8 *primary_addr;
2281 
2282         primary_if = batadv_seq_print_text_primary_if_get(seq);
2283         if (!primary_if)
2284                 goto out;
2285 
2286         primary_addr = primary_if->net_dev->dev_addr;
2287         seq_printf(seq,
2288                    "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
2289                    net_dev->name, primary_addr,
2290                    ntohs(bat_priv->bla.claim_dest.group));
2291         seq_puts(seq, "   Originator           VID   last seen (CRC   )\n");
2292         for (i = 0; i < hash->size; i++) {
2293                 head = &hash->table[i];
2294 
2295                 rcu_read_lock();
2296                 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2297                         msecs = jiffies_to_msecs(jiffies -
2298                                                  backbone_gw->lasttime);
2299                         secs = msecs / 1000;
2300                         msecs = msecs % 1000;
2301 
2302                         is_own = batadv_compare_eth(backbone_gw->orig,
2303                                                     primary_addr);
2304                         if (is_own)
2305                                 continue;
2306 
2307                         spin_lock_bh(&backbone_gw->crc_lock);
2308                         backbone_crc = backbone_gw->crc;
2309                         spin_unlock_bh(&backbone_gw->crc_lock);
2310 
2311                         seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
2312                                    backbone_gw->orig,
2313                                    batadv_print_vid(backbone_gw->vid), secs,
2314                                    msecs, backbone_crc);
2315                 }
2316                 rcu_read_unlock();
2317         }
2318 out:
2319         if (primary_if)
2320                 batadv_hardif_put(primary_if);
2321         return 0;
2322 }
2323 #endif
2324 
2325 
2326 
2327 
2328 
2329 
2330 
2331 
2332 
2333 
2334 
2335 
2336 static int
2337 batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
2338                                struct netlink_callback *cb,
2339                                struct batadv_hard_iface *primary_if,
2340                                struct batadv_bla_backbone_gw *backbone_gw)
2341 {
2342         u8 *primary_addr = primary_if->net_dev->dev_addr;
2343         u16 backbone_crc;
2344         bool is_own;
2345         int msecs;
2346         void *hdr;
2347         int ret = -EINVAL;
2348 
2349         hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2350                           &batadv_netlink_family, NLM_F_MULTI,
2351                           BATADV_CMD_GET_BLA_BACKBONE);
2352         if (!hdr) {
2353                 ret = -ENOBUFS;
2354                 goto out;
2355         }
2356 
2357         genl_dump_check_consistent(cb, hdr);
2358 
2359         is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
2360 
2361         spin_lock_bh(&backbone_gw->crc_lock);
2362         backbone_crc = backbone_gw->crc;
2363         spin_unlock_bh(&backbone_gw->crc_lock);
2364 
2365         msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
2366 
2367         if (is_own)
2368                 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2369                         genlmsg_cancel(msg, hdr);
2370                         goto out;
2371                 }
2372 
2373         if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2374                     backbone_gw->orig) ||
2375             nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
2376             nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2377                         backbone_crc) ||
2378             nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
2379                 genlmsg_cancel(msg, hdr);
2380                 goto out;
2381         }
2382 
2383         genlmsg_end(msg, hdr);
2384         ret = 0;
2385 
2386 out:
2387         return ret;
2388 }
2389 
2390 
2391 
2392 
2393 
2394 
2395 
2396 
2397 
2398 
2399 
2400 
2401 
2402 
2403 static int
2404 batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
2405                                 struct netlink_callback *cb,
2406                                 struct batadv_hard_iface *primary_if,
2407                                 struct batadv_hashtable *hash,
2408                                 unsigned int bucket, int *idx_skip)
2409 {
2410         struct batadv_bla_backbone_gw *backbone_gw;
2411         int idx = 0;
2412         int ret = 0;
2413 
2414         spin_lock_bh(&hash->list_locks[bucket]);
2415         cb->seq = atomic_read(&hash->generation) << 1 | 1;
2416 
2417         hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
2418                 if (idx++ < *idx_skip)
2419                         continue;
2420 
2421                 ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
2422                                                      primary_if, backbone_gw);
2423                 if (ret) {
2424                         *idx_skip = idx - 1;
2425                         goto unlock;
2426                 }
2427         }
2428 
2429         *idx_skip = 0;
2430 unlock:
2431         spin_unlock_bh(&hash->list_locks[bucket]);
2432         return ret;
2433 }
2434 
2435 
2436 
2437 
2438 
2439 
2440 
2441 
2442 int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
2443 {
2444         struct batadv_hard_iface *primary_if = NULL;
2445         int portid = NETLINK_CB(cb->skb).portid;
2446         struct net *net = sock_net(cb->skb->sk);
2447         struct net_device *soft_iface;
2448         struct batadv_hashtable *hash;
2449         struct batadv_priv *bat_priv;
2450         int bucket = cb->args[0];
2451         int idx = cb->args[1];
2452         int ifindex;
2453         int ret = 0;
2454 
2455         ifindex = batadv_netlink_get_ifindex(cb->nlh,
2456                                              BATADV_ATTR_MESH_IFINDEX);
2457         if (!ifindex)
2458                 return -EINVAL;
2459 
2460         soft_iface = dev_get_by_index(net, ifindex);
2461         if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2462                 ret = -ENODEV;
2463                 goto out;
2464         }
2465 
2466         bat_priv = netdev_priv(soft_iface);
2467         hash = bat_priv->bla.backbone_hash;
2468 
2469         primary_if = batadv_primary_if_get_selected(bat_priv);
2470         if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2471                 ret = -ENOENT;
2472                 goto out;
2473         }
2474 
2475         while (bucket < hash->size) {
2476                 if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
2477                                                     hash, bucket, &idx))
2478                         break;
2479                 bucket++;
2480         }
2481 
2482         cb->args[0] = bucket;
2483         cb->args[1] = idx;
2484 
2485         ret = msg->len;
2486 
2487 out:
2488         if (primary_if)
2489                 batadv_hardif_put(primary_if);
2490 
2491         if (soft_iface)
2492                 dev_put(soft_iface);
2493 
2494         return ret;
2495 }
2496 
2497 #ifdef CONFIG_BATMAN_ADV_DAT
2498 
2499 
2500 
2501 
2502 
2503 
2504 
2505 
2506 
2507 
2508 
2509 
2510 bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
2511                             u8 *addr, unsigned short vid)
2512 {
2513         struct batadv_bla_claim search_claim;
2514         struct batadv_bla_claim *claim = NULL;
2515         struct batadv_hard_iface *primary_if = NULL;
2516         bool ret = true;
2517 
2518         if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2519                 return ret;
2520 
2521         primary_if = batadv_primary_if_get_selected(bat_priv);
2522         if (!primary_if)
2523                 return ret;
2524 
2525         
2526         ether_addr_copy(search_claim.addr, addr);
2527         search_claim.vid = vid;
2528 
2529         claim = batadv_claim_hash_find(bat_priv, &search_claim);
2530 
2531         
2532 
2533 
2534         if (claim) {
2535                 if (!batadv_compare_eth(claim->backbone_gw->orig,
2536                                         primary_if->net_dev->dev_addr))
2537                         ret = false;
2538                 batadv_claim_put(claim);
2539         }
2540 
2541         batadv_hardif_put(primary_if);
2542         return ret;
2543 }
2544 #endif