root/net/openvswitch/flow_table.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. range_n_bytes
  2. ovs_flow_mask_key
  3. ovs_flow_alloc
  4. ovs_flow_tbl_count
  5. flow_free
  6. rcu_free_flow_callback
  7. ovs_flow_free
  8. __table_instance_destroy
  9. table_instance_alloc
  10. ovs_flow_tbl_init
  11. flow_tbl_destroy_rcu_cb
  12. table_instance_destroy
  13. ovs_flow_tbl_destroy
  14. ovs_flow_tbl_dump_next
  15. find_bucket
  16. table_instance_insert
  17. ufid_table_instance_insert
  18. flow_table_copy_flows
  19. table_instance_rehash
  20. ovs_flow_tbl_flush
  21. flow_hash
  22. flow_key_start
  23. cmp_key
  24. flow_cmp_masked_key
  25. ovs_flow_cmp_unmasked_key
  26. masked_flow_lookup
  27. ovs_flow_tbl_lookup_stats
  28. ovs_flow_tbl_lookup
  29. ovs_flow_tbl_lookup_exact
  30. ufid_hash
  31. ovs_flow_cmp_ufid
  32. ovs_flow_cmp
  33. ovs_flow_tbl_lookup_ufid
  34. ovs_flow_tbl_num_masks
  35. table_instance_expand
  36. flow_mask_remove
  37. ovs_flow_tbl_remove
  38. mask_alloc
  39. mask_equal
  40. flow_mask_find
  41. flow_mask_insert
  42. flow_key_insert
  43. flow_ufid_insert
  44. ovs_flow_tbl_insert
  45. ovs_flow_init
  46. ovs_flow_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2007-2014 Nicira, Inc.
   4  */
   5 
   6 #include "flow.h"
   7 #include "datapath.h"
   8 #include "flow_netlink.h"
   9 #include <linux/uaccess.h>
  10 #include <linux/netdevice.h>
  11 #include <linux/etherdevice.h>
  12 #include <linux/if_ether.h>
  13 #include <linux/if_vlan.h>
  14 #include <net/llc_pdu.h>
  15 #include <linux/kernel.h>
  16 #include <linux/jhash.h>
  17 #include <linux/jiffies.h>
  18 #include <linux/llc.h>
  19 #include <linux/module.h>
  20 #include <linux/in.h>
  21 #include <linux/rcupdate.h>
  22 #include <linux/cpumask.h>
  23 #include <linux/if_arp.h>
  24 #include <linux/ip.h>
  25 #include <linux/ipv6.h>
  26 #include <linux/sctp.h>
  27 #include <linux/tcp.h>
  28 #include <linux/udp.h>
  29 #include <linux/icmp.h>
  30 #include <linux/icmpv6.h>
  31 #include <linux/rculist.h>
  32 #include <net/ip.h>
  33 #include <net/ipv6.h>
  34 #include <net/ndisc.h>
  35 
  36 #define TBL_MIN_BUCKETS         1024
  37 #define REHASH_INTERVAL         (10 * 60 * HZ)
  38 
  39 static struct kmem_cache *flow_cache;
  40 struct kmem_cache *flow_stats_cache __read_mostly;
  41 
  42 static u16 range_n_bytes(const struct sw_flow_key_range *range)
  43 {
  44         return range->end - range->start;
  45 }
  46 
  47 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
  48                        bool full, const struct sw_flow_mask *mask)
  49 {
  50         int start = full ? 0 : mask->range.start;
  51         int len = full ? sizeof *dst : range_n_bytes(&mask->range);
  52         const long *m = (const long *)((const u8 *)&mask->key + start);
  53         const long *s = (const long *)((const u8 *)src + start);
  54         long *d = (long *)((u8 *)dst + start);
  55         int i;
  56 
  57         /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
  58          * if 'full' is false the memory outside of the 'mask->range' is left
  59          * uninitialized. This can be used as an optimization when further
  60          * operations on 'dst' only use contents within 'mask->range'.
  61          */
  62         for (i = 0; i < len; i += sizeof(long))
  63                 *d++ = *s++ & *m++;
  64 }
  65 
  66 struct sw_flow *ovs_flow_alloc(void)
  67 {
  68         struct sw_flow *flow;
  69         struct sw_flow_stats *stats;
  70 
  71         flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
  72         if (!flow)
  73                 return ERR_PTR(-ENOMEM);
  74 
  75         flow->stats_last_writer = -1;
  76 
  77         /* Initialize the default stat node. */
  78         stats = kmem_cache_alloc_node(flow_stats_cache,
  79                                       GFP_KERNEL | __GFP_ZERO,
  80                                       node_online(0) ? 0 : NUMA_NO_NODE);
  81         if (!stats)
  82                 goto err;
  83 
  84         spin_lock_init(&stats->lock);
  85 
  86         RCU_INIT_POINTER(flow->stats[0], stats);
  87 
  88         cpumask_set_cpu(0, &flow->cpu_used_mask);
  89 
  90         return flow;
  91 err:
  92         kmem_cache_free(flow_cache, flow);
  93         return ERR_PTR(-ENOMEM);
  94 }
  95 
  96 int ovs_flow_tbl_count(const struct flow_table *table)
  97 {
  98         return table->count;
  99 }
 100 
 101 static void flow_free(struct sw_flow *flow)
 102 {
 103         int cpu;
 104 
 105         if (ovs_identifier_is_key(&flow->id))
 106                 kfree(flow->id.unmasked_key);
 107         if (flow->sf_acts)
 108                 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
 109         /* We open code this to make sure cpu 0 is always considered */
 110         for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
 111                 if (flow->stats[cpu])
 112                         kmem_cache_free(flow_stats_cache,
 113                                         (struct sw_flow_stats __force *)flow->stats[cpu]);
 114         kmem_cache_free(flow_cache, flow);
 115 }
 116 
 117 static void rcu_free_flow_callback(struct rcu_head *rcu)
 118 {
 119         struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
 120 
 121         flow_free(flow);
 122 }
 123 
 124 void ovs_flow_free(struct sw_flow *flow, bool deferred)
 125 {
 126         if (!flow)
 127                 return;
 128 
 129         if (deferred)
 130                 call_rcu(&flow->rcu, rcu_free_flow_callback);
 131         else
 132                 flow_free(flow);
 133 }
 134 
 135 static void __table_instance_destroy(struct table_instance *ti)
 136 {
 137         kvfree(ti->buckets);
 138         kfree(ti);
 139 }
 140 
 141 static struct table_instance *table_instance_alloc(int new_size)
 142 {
 143         struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
 144         int i;
 145 
 146         if (!ti)
 147                 return NULL;
 148 
 149         ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
 150                                      GFP_KERNEL);
 151         if (!ti->buckets) {
 152                 kfree(ti);
 153                 return NULL;
 154         }
 155 
 156         for (i = 0; i < new_size; i++)
 157                 INIT_HLIST_HEAD(&ti->buckets[i]);
 158 
 159         ti->n_buckets = new_size;
 160         ti->node_ver = 0;
 161         ti->keep_flows = false;
 162         get_random_bytes(&ti->hash_seed, sizeof(u32));
 163 
 164         return ti;
 165 }
 166 
 167 int ovs_flow_tbl_init(struct flow_table *table)
 168 {
 169         struct table_instance *ti, *ufid_ti;
 170 
 171         ti = table_instance_alloc(TBL_MIN_BUCKETS);
 172 
 173         if (!ti)
 174                 return -ENOMEM;
 175 
 176         ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
 177         if (!ufid_ti)
 178                 goto free_ti;
 179 
 180         rcu_assign_pointer(table->ti, ti);
 181         rcu_assign_pointer(table->ufid_ti, ufid_ti);
 182         INIT_LIST_HEAD(&table->mask_list);
 183         table->last_rehash = jiffies;
 184         table->count = 0;
 185         table->ufid_count = 0;
 186         return 0;
 187 
 188 free_ti:
 189         __table_instance_destroy(ti);
 190         return -ENOMEM;
 191 }
 192 
 193 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
 194 {
 195         struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
 196 
 197         __table_instance_destroy(ti);
 198 }
 199 
 200 static void table_instance_destroy(struct table_instance *ti,
 201                                    struct table_instance *ufid_ti,
 202                                    bool deferred)
 203 {
 204         int i;
 205 
 206         if (!ti)
 207                 return;
 208 
 209         BUG_ON(!ufid_ti);
 210         if (ti->keep_flows)
 211                 goto skip_flows;
 212 
 213         for (i = 0; i < ti->n_buckets; i++) {
 214                 struct sw_flow *flow;
 215                 struct hlist_head *head = &ti->buckets[i];
 216                 struct hlist_node *n;
 217                 int ver = ti->node_ver;
 218                 int ufid_ver = ufid_ti->node_ver;
 219 
 220                 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
 221                         hlist_del_rcu(&flow->flow_table.node[ver]);
 222                         if (ovs_identifier_is_ufid(&flow->id))
 223                                 hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
 224                         ovs_flow_free(flow, deferred);
 225                 }
 226         }
 227 
 228 skip_flows:
 229         if (deferred) {
 230                 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
 231                 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
 232         } else {
 233                 __table_instance_destroy(ti);
 234                 __table_instance_destroy(ufid_ti);
 235         }
 236 }
 237 
 238 /* No need for locking this function is called from RCU callback or
 239  * error path.
 240  */
 241 void ovs_flow_tbl_destroy(struct flow_table *table)
 242 {
 243         struct table_instance *ti = rcu_dereference_raw(table->ti);
 244         struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
 245 
 246         table_instance_destroy(ti, ufid_ti, false);
 247 }
 248 
 249 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
 250                                        u32 *bucket, u32 *last)
 251 {
 252         struct sw_flow *flow;
 253         struct hlist_head *head;
 254         int ver;
 255         int i;
 256 
 257         ver = ti->node_ver;
 258         while (*bucket < ti->n_buckets) {
 259                 i = 0;
 260                 head = &ti->buckets[*bucket];
 261                 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
 262                         if (i < *last) {
 263                                 i++;
 264                                 continue;
 265                         }
 266                         *last = i + 1;
 267                         return flow;
 268                 }
 269                 (*bucket)++;
 270                 *last = 0;
 271         }
 272 
 273         return NULL;
 274 }
 275 
 276 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
 277 {
 278         hash = jhash_1word(hash, ti->hash_seed);
 279         return &ti->buckets[hash & (ti->n_buckets - 1)];
 280 }
 281 
 282 static void table_instance_insert(struct table_instance *ti,
 283                                   struct sw_flow *flow)
 284 {
 285         struct hlist_head *head;
 286 
 287         head = find_bucket(ti, flow->flow_table.hash);
 288         hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
 289 }
 290 
 291 static void ufid_table_instance_insert(struct table_instance *ti,
 292                                        struct sw_flow *flow)
 293 {
 294         struct hlist_head *head;
 295 
 296         head = find_bucket(ti, flow->ufid_table.hash);
 297         hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
 298 }
 299 
 300 static void flow_table_copy_flows(struct table_instance *old,
 301                                   struct table_instance *new, bool ufid)
 302 {
 303         int old_ver;
 304         int i;
 305 
 306         old_ver = old->node_ver;
 307         new->node_ver = !old_ver;
 308 
 309         /* Insert in new table. */
 310         for (i = 0; i < old->n_buckets; i++) {
 311                 struct sw_flow *flow;
 312                 struct hlist_head *head = &old->buckets[i];
 313 
 314                 if (ufid)
 315                         hlist_for_each_entry(flow, head,
 316                                              ufid_table.node[old_ver])
 317                                 ufid_table_instance_insert(new, flow);
 318                 else
 319                         hlist_for_each_entry(flow, head,
 320                                              flow_table.node[old_ver])
 321                                 table_instance_insert(new, flow);
 322         }
 323 
 324         old->keep_flows = true;
 325 }
 326 
 327 static struct table_instance *table_instance_rehash(struct table_instance *ti,
 328                                                     int n_buckets, bool ufid)
 329 {
 330         struct table_instance *new_ti;
 331 
 332         new_ti = table_instance_alloc(n_buckets);
 333         if (!new_ti)
 334                 return NULL;
 335 
 336         flow_table_copy_flows(ti, new_ti, ufid);
 337 
 338         return new_ti;
 339 }
 340 
 341 int ovs_flow_tbl_flush(struct flow_table *flow_table)
 342 {
 343         struct table_instance *old_ti, *new_ti;
 344         struct table_instance *old_ufid_ti, *new_ufid_ti;
 345 
 346         new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
 347         if (!new_ti)
 348                 return -ENOMEM;
 349         new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
 350         if (!new_ufid_ti)
 351                 goto err_free_ti;
 352 
 353         old_ti = ovsl_dereference(flow_table->ti);
 354         old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
 355 
 356         rcu_assign_pointer(flow_table->ti, new_ti);
 357         rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
 358         flow_table->last_rehash = jiffies;
 359         flow_table->count = 0;
 360         flow_table->ufid_count = 0;
 361 
 362         table_instance_destroy(old_ti, old_ufid_ti, true);
 363         return 0;
 364 
 365 err_free_ti:
 366         __table_instance_destroy(new_ti);
 367         return -ENOMEM;
 368 }
 369 
 370 static u32 flow_hash(const struct sw_flow_key *key,
 371                      const struct sw_flow_key_range *range)
 372 {
 373         int key_start = range->start;
 374         int key_end = range->end;
 375         const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
 376         int hash_u32s = (key_end - key_start) >> 2;
 377 
 378         /* Make sure number of hash bytes are multiple of u32. */
 379         BUILD_BUG_ON(sizeof(long) % sizeof(u32));
 380 
 381         return jhash2(hash_key, hash_u32s, 0);
 382 }
 383 
 384 static int flow_key_start(const struct sw_flow_key *key)
 385 {
 386         if (key->tun_proto)
 387                 return 0;
 388         else
 389                 return rounddown(offsetof(struct sw_flow_key, phy),
 390                                           sizeof(long));
 391 }
 392 
 393 static bool cmp_key(const struct sw_flow_key *key1,
 394                     const struct sw_flow_key *key2,
 395                     int key_start, int key_end)
 396 {
 397         const long *cp1 = (const long *)((const u8 *)key1 + key_start);
 398         const long *cp2 = (const long *)((const u8 *)key2 + key_start);
 399         long diffs = 0;
 400         int i;
 401 
 402         for (i = key_start; i < key_end;  i += sizeof(long))
 403                 diffs |= *cp1++ ^ *cp2++;
 404 
 405         return diffs == 0;
 406 }
 407 
 408 static bool flow_cmp_masked_key(const struct sw_flow *flow,
 409                                 const struct sw_flow_key *key,
 410                                 const struct sw_flow_key_range *range)
 411 {
 412         return cmp_key(&flow->key, key, range->start, range->end);
 413 }
 414 
 415 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
 416                                       const struct sw_flow_match *match)
 417 {
 418         struct sw_flow_key *key = match->key;
 419         int key_start = flow_key_start(key);
 420         int key_end = match->range.end;
 421 
 422         BUG_ON(ovs_identifier_is_ufid(&flow->id));
 423         return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
 424 }
 425 
 426 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
 427                                           const struct sw_flow_key *unmasked,
 428                                           const struct sw_flow_mask *mask)
 429 {
 430         struct sw_flow *flow;
 431         struct hlist_head *head;
 432         u32 hash;
 433         struct sw_flow_key masked_key;
 434 
 435         ovs_flow_mask_key(&masked_key, unmasked, false, mask);
 436         hash = flow_hash(&masked_key, &mask->range);
 437         head = find_bucket(ti, hash);
 438         hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
 439                 if (flow->mask == mask && flow->flow_table.hash == hash &&
 440                     flow_cmp_masked_key(flow, &masked_key, &mask->range))
 441                         return flow;
 442         }
 443         return NULL;
 444 }
 445 
 446 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 447                                     const struct sw_flow_key *key,
 448                                     u32 *n_mask_hit)
 449 {
 450         struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
 451         struct sw_flow_mask *mask;
 452         struct sw_flow *flow;
 453 
 454         *n_mask_hit = 0;
 455         list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
 456                 (*n_mask_hit)++;
 457                 flow = masked_flow_lookup(ti, key, mask);
 458                 if (flow)  /* Found */
 459                         return flow;
 460         }
 461         return NULL;
 462 }
 463 
 464 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
 465                                     const struct sw_flow_key *key)
 466 {
 467         u32 __always_unused n_mask_hit;
 468 
 469         return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
 470 }
 471 
 472 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
 473                                           const struct sw_flow_match *match)
 474 {
 475         struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
 476         struct sw_flow_mask *mask;
 477         struct sw_flow *flow;
 478 
 479         /* Always called under ovs-mutex. */
 480         list_for_each_entry(mask, &tbl->mask_list, list) {
 481                 flow = masked_flow_lookup(ti, match->key, mask);
 482                 if (flow && ovs_identifier_is_key(&flow->id) &&
 483                     ovs_flow_cmp_unmasked_key(flow, match))
 484                         return flow;
 485         }
 486         return NULL;
 487 }
 488 
 489 static u32 ufid_hash(const struct sw_flow_id *sfid)
 490 {
 491         return jhash(sfid->ufid, sfid->ufid_len, 0);
 492 }
 493 
 494 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
 495                               const struct sw_flow_id *sfid)
 496 {
 497         if (flow->id.ufid_len != sfid->ufid_len)
 498                 return false;
 499 
 500         return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
 501 }
 502 
 503 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
 504 {
 505         if (ovs_identifier_is_ufid(&flow->id))
 506                 return flow_cmp_masked_key(flow, match->key, &match->range);
 507 
 508         return ovs_flow_cmp_unmasked_key(flow, match);
 509 }
 510 
 511 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
 512                                          const struct sw_flow_id *ufid)
 513 {
 514         struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
 515         struct sw_flow *flow;
 516         struct hlist_head *head;
 517         u32 hash;
 518 
 519         hash = ufid_hash(ufid);
 520         head = find_bucket(ti, hash);
 521         hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
 522                 if (flow->ufid_table.hash == hash &&
 523                     ovs_flow_cmp_ufid(flow, ufid))
 524                         return flow;
 525         }
 526         return NULL;
 527 }
 528 
 529 int ovs_flow_tbl_num_masks(const struct flow_table *table)
 530 {
 531         struct sw_flow_mask *mask;
 532         int num = 0;
 533 
 534         list_for_each_entry(mask, &table->mask_list, list)
 535                 num++;
 536 
 537         return num;
 538 }
 539 
 540 static struct table_instance *table_instance_expand(struct table_instance *ti,
 541                                                     bool ufid)
 542 {
 543         return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
 544 }
 545 
 546 /* Remove 'mask' from the mask list, if it is not needed any more. */
 547 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
 548 {
 549         if (mask) {
 550                 /* ovs-lock is required to protect mask-refcount and
 551                  * mask list.
 552                  */
 553                 ASSERT_OVSL();
 554                 BUG_ON(!mask->ref_count);
 555                 mask->ref_count--;
 556 
 557                 if (!mask->ref_count) {
 558                         list_del_rcu(&mask->list);
 559                         kfree_rcu(mask, rcu);
 560                 }
 561         }
 562 }
 563 
 564 /* Must be called with OVS mutex held. */
 565 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
 566 {
 567         struct table_instance *ti = ovsl_dereference(table->ti);
 568         struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
 569 
 570         BUG_ON(table->count == 0);
 571         hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
 572         table->count--;
 573         if (ovs_identifier_is_ufid(&flow->id)) {
 574                 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
 575                 table->ufid_count--;
 576         }
 577 
 578         /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
 579          * accessible as long as the RCU read lock is held.
 580          */
 581         flow_mask_remove(table, flow->mask);
 582 }
 583 
 584 static struct sw_flow_mask *mask_alloc(void)
 585 {
 586         struct sw_flow_mask *mask;
 587 
 588         mask = kmalloc(sizeof(*mask), GFP_KERNEL);
 589         if (mask)
 590                 mask->ref_count = 1;
 591 
 592         return mask;
 593 }
 594 
 595 static bool mask_equal(const struct sw_flow_mask *a,
 596                        const struct sw_flow_mask *b)
 597 {
 598         const u8 *a_ = (const u8 *)&a->key + a->range.start;
 599         const u8 *b_ = (const u8 *)&b->key + b->range.start;
 600 
 601         return  (a->range.end == b->range.end)
 602                 && (a->range.start == b->range.start)
 603                 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
 604 }
 605 
 606 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
 607                                            const struct sw_flow_mask *mask)
 608 {
 609         struct list_head *ml;
 610 
 611         list_for_each(ml, &tbl->mask_list) {
 612                 struct sw_flow_mask *m;
 613                 m = container_of(ml, struct sw_flow_mask, list);
 614                 if (mask_equal(mask, m))
 615                         return m;
 616         }
 617 
 618         return NULL;
 619 }
 620 
 621 /* Add 'mask' into the mask list, if it is not already there. */
 622 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
 623                             const struct sw_flow_mask *new)
 624 {
 625         struct sw_flow_mask *mask;
 626         mask = flow_mask_find(tbl, new);
 627         if (!mask) {
 628                 /* Allocate a new mask if none exsits. */
 629                 mask = mask_alloc();
 630                 if (!mask)
 631                         return -ENOMEM;
 632                 mask->key = new->key;
 633                 mask->range = new->range;
 634                 list_add_rcu(&mask->list, &tbl->mask_list);
 635         } else {
 636                 BUG_ON(!mask->ref_count);
 637                 mask->ref_count++;
 638         }
 639 
 640         flow->mask = mask;
 641         return 0;
 642 }
 643 
 644 /* Must be called with OVS mutex held. */
 645 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
 646 {
 647         struct table_instance *new_ti = NULL;
 648         struct table_instance *ti;
 649 
 650         flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
 651         ti = ovsl_dereference(table->ti);
 652         table_instance_insert(ti, flow);
 653         table->count++;
 654 
 655         /* Expand table, if necessary, to make room. */
 656         if (table->count > ti->n_buckets)
 657                 new_ti = table_instance_expand(ti, false);
 658         else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
 659                 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
 660 
 661         if (new_ti) {
 662                 rcu_assign_pointer(table->ti, new_ti);
 663                 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
 664                 table->last_rehash = jiffies;
 665         }
 666 }
 667 
 668 /* Must be called with OVS mutex held. */
 669 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
 670 {
 671         struct table_instance *ti;
 672 
 673         flow->ufid_table.hash = ufid_hash(&flow->id);
 674         ti = ovsl_dereference(table->ufid_ti);
 675         ufid_table_instance_insert(ti, flow);
 676         table->ufid_count++;
 677 
 678         /* Expand table, if necessary, to make room. */
 679         if (table->ufid_count > ti->n_buckets) {
 680                 struct table_instance *new_ti;
 681 
 682                 new_ti = table_instance_expand(ti, true);
 683                 if (new_ti) {
 684                         rcu_assign_pointer(table->ufid_ti, new_ti);
 685                         call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
 686                 }
 687         }
 688 }
 689 
 690 /* Must be called with OVS mutex held. */
 691 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
 692                         const struct sw_flow_mask *mask)
 693 {
 694         int err;
 695 
 696         err = flow_mask_insert(table, flow, mask);
 697         if (err)
 698                 return err;
 699         flow_key_insert(table, flow);
 700         if (ovs_identifier_is_ufid(&flow->id))
 701                 flow_ufid_insert(table, flow);
 702 
 703         return 0;
 704 }
 705 
 706 /* Initializes the flow module.
 707  * Returns zero if successful or a negative error code. */
 708 int ovs_flow_init(void)
 709 {
 710         BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
 711         BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
 712 
 713         flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
 714                                        + (nr_cpu_ids
 715                                           * sizeof(struct sw_flow_stats *)),
 716                                        0, 0, NULL);
 717         if (flow_cache == NULL)
 718                 return -ENOMEM;
 719 
 720         flow_stats_cache
 721                 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
 722                                     0, SLAB_HWCACHE_ALIGN, NULL);
 723         if (flow_stats_cache == NULL) {
 724                 kmem_cache_destroy(flow_cache);
 725                 flow_cache = NULL;
 726                 return -ENOMEM;
 727         }
 728 
 729         return 0;
 730 }
 731 
 732 /* Uninitializes the flow module. */
 733 void ovs_flow_exit(void)
 734 {
 735         kmem_cache_destroy(flow_stats_cache);
 736         kmem_cache_destroy(flow_cache);
 737 }

/* [<][>][^][v][top][bottom][index][help] */