1/*
2 * Copyright (c) 2007-2014 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#include "flow.h"
20#include "datapath.h"
21#include <linux/uaccess.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/if_ether.h>
25#include <linux/if_vlan.h>
26#include <net/llc_pdu.h>
27#include <linux/kernel.h>
28#include <linux/jhash.h>
29#include <linux/jiffies.h>
30#include <linux/llc.h>
31#include <linux/module.h>
32#include <linux/in.h>
33#include <linux/rcupdate.h>
34#include <linux/if_arp.h>
35#include <linux/ip.h>
36#include <linux/ipv6.h>
37#include <linux/sctp.h>
38#include <linux/tcp.h>
39#include <linux/udp.h>
40#include <linux/icmp.h>
41#include <linux/icmpv6.h>
42#include <linux/rculist.h>
43#include <net/ip.h>
44#include <net/ipv6.h>
45#include <net/ndisc.h>
46
47#define TBL_MIN_BUCKETS		1024
48#define REHASH_INTERVAL		(10 * 60 * HZ)
49
50static struct kmem_cache *flow_cache;
51struct kmem_cache *flow_stats_cache __read_mostly;
52
53static u16 range_n_bytes(const struct sw_flow_key_range *range)
54{
55	return range->end - range->start;
56}
57
58void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
59		       bool full, const struct sw_flow_mask *mask)
60{
61	int start = full ? 0 : mask->range.start;
62	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
63	const long *m = (const long *)((const u8 *)&mask->key + start);
64	const long *s = (const long *)((const u8 *)src + start);
65	long *d = (long *)((u8 *)dst + start);
66	int i;
67
68	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
69	 * if 'full' is false the memory outside of the 'mask->range' is left
70	 * uninitialized. This can be used as an optimization when further
71	 * operations on 'dst' only use contents within 'mask->range'.
72	 */
73	for (i = 0; i < len; i += sizeof(long))
74		*d++ = *s++ & *m++;
75}
76
77struct sw_flow *ovs_flow_alloc(void)
78{
79	struct sw_flow *flow;
80	struct flow_stats *stats;
81	int node;
82
83	flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
84	if (!flow)
85		return ERR_PTR(-ENOMEM);
86
87	flow->sf_acts = NULL;
88	flow->mask = NULL;
89	flow->id.unmasked_key = NULL;
90	flow->id.ufid_len = 0;
91	flow->stats_last_writer = NUMA_NO_NODE;
92
93	/* Initialize the default stat node. */
94	stats = kmem_cache_alloc_node(flow_stats_cache,
95				      GFP_KERNEL | __GFP_ZERO,
96				      node_online(0) ? 0 : NUMA_NO_NODE);
97	if (!stats)
98		goto err;
99
100	spin_lock_init(&stats->lock);
101
102	RCU_INIT_POINTER(flow->stats[0], stats);
103
104	for_each_node(node)
105		if (node != 0)
106			RCU_INIT_POINTER(flow->stats[node], NULL);
107
108	return flow;
109err:
110	kmem_cache_free(flow_cache, flow);
111	return ERR_PTR(-ENOMEM);
112}
113
114int ovs_flow_tbl_count(const struct flow_table *table)
115{
116	return table->count;
117}
118
119static struct flex_array *alloc_buckets(unsigned int n_buckets)
120{
121	struct flex_array *buckets;
122	int i, err;
123
124	buckets = flex_array_alloc(sizeof(struct hlist_head),
125				   n_buckets, GFP_KERNEL);
126	if (!buckets)
127		return NULL;
128
129	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
130	if (err) {
131		flex_array_free(buckets);
132		return NULL;
133	}
134
135	for (i = 0; i < n_buckets; i++)
136		INIT_HLIST_HEAD((struct hlist_head *)
137					flex_array_get(buckets, i));
138
139	return buckets;
140}
141
142static void flow_free(struct sw_flow *flow)
143{
144	int node;
145
146	if (ovs_identifier_is_key(&flow->id))
147		kfree(flow->id.unmasked_key);
148	kfree((struct sw_flow_actions __force *)flow->sf_acts);
149	for_each_node(node)
150		if (flow->stats[node])
151			kmem_cache_free(flow_stats_cache,
152					(struct flow_stats __force *)flow->stats[node]);
153	kmem_cache_free(flow_cache, flow);
154}
155
156static void rcu_free_flow_callback(struct rcu_head *rcu)
157{
158	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
159
160	flow_free(flow);
161}
162
163void ovs_flow_free(struct sw_flow *flow, bool deferred)
164{
165	if (!flow)
166		return;
167
168	if (deferred)
169		call_rcu(&flow->rcu, rcu_free_flow_callback);
170	else
171		flow_free(flow);
172}
173
174static void free_buckets(struct flex_array *buckets)
175{
176	flex_array_free(buckets);
177}
178
179
180static void __table_instance_destroy(struct table_instance *ti)
181{
182	free_buckets(ti->buckets);
183	kfree(ti);
184}
185
186static struct table_instance *table_instance_alloc(int new_size)
187{
188	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
189
190	if (!ti)
191		return NULL;
192
193	ti->buckets = alloc_buckets(new_size);
194
195	if (!ti->buckets) {
196		kfree(ti);
197		return NULL;
198	}
199	ti->n_buckets = new_size;
200	ti->node_ver = 0;
201	ti->keep_flows = false;
202	get_random_bytes(&ti->hash_seed, sizeof(u32));
203
204	return ti;
205}
206
207int ovs_flow_tbl_init(struct flow_table *table)
208{
209	struct table_instance *ti, *ufid_ti;
210
211	ti = table_instance_alloc(TBL_MIN_BUCKETS);
212
213	if (!ti)
214		return -ENOMEM;
215
216	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
217	if (!ufid_ti)
218		goto free_ti;
219
220	rcu_assign_pointer(table->ti, ti);
221	rcu_assign_pointer(table->ufid_ti, ufid_ti);
222	INIT_LIST_HEAD(&table->mask_list);
223	table->last_rehash = jiffies;
224	table->count = 0;
225	table->ufid_count = 0;
226	return 0;
227
228free_ti:
229	__table_instance_destroy(ti);
230	return -ENOMEM;
231}
232
233static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
234{
235	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
236
237	__table_instance_destroy(ti);
238}
239
240static void table_instance_destroy(struct table_instance *ti,
241				   struct table_instance *ufid_ti,
242				   bool deferred)
243{
244	int i;
245
246	if (!ti)
247		return;
248
249	BUG_ON(!ufid_ti);
250	if (ti->keep_flows)
251		goto skip_flows;
252
253	for (i = 0; i < ti->n_buckets; i++) {
254		struct sw_flow *flow;
255		struct hlist_head *head = flex_array_get(ti->buckets, i);
256		struct hlist_node *n;
257		int ver = ti->node_ver;
258		int ufid_ver = ufid_ti->node_ver;
259
260		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
261			hlist_del_rcu(&flow->flow_table.node[ver]);
262			if (ovs_identifier_is_ufid(&flow->id))
263				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
264			ovs_flow_free(flow, deferred);
265		}
266	}
267
268skip_flows:
269	if (deferred) {
270		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
271		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
272	} else {
273		__table_instance_destroy(ti);
274		__table_instance_destroy(ufid_ti);
275	}
276}
277
278/* No need for locking this function is called from RCU callback or
279 * error path.
280 */
281void ovs_flow_tbl_destroy(struct flow_table *table)
282{
283	struct table_instance *ti = rcu_dereference_raw(table->ti);
284	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
285
286	table_instance_destroy(ti, ufid_ti, false);
287}
288
289struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
290				       u32 *bucket, u32 *last)
291{
292	struct sw_flow *flow;
293	struct hlist_head *head;
294	int ver;
295	int i;
296
297	ver = ti->node_ver;
298	while (*bucket < ti->n_buckets) {
299		i = 0;
300		head = flex_array_get(ti->buckets, *bucket);
301		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
302			if (i < *last) {
303				i++;
304				continue;
305			}
306			*last = i + 1;
307			return flow;
308		}
309		(*bucket)++;
310		*last = 0;
311	}
312
313	return NULL;
314}
315
316static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
317{
318	hash = jhash_1word(hash, ti->hash_seed);
319	return flex_array_get(ti->buckets,
320				(hash & (ti->n_buckets - 1)));
321}
322
323static void table_instance_insert(struct table_instance *ti,
324				  struct sw_flow *flow)
325{
326	struct hlist_head *head;
327
328	head = find_bucket(ti, flow->flow_table.hash);
329	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
330}
331
332static void ufid_table_instance_insert(struct table_instance *ti,
333				       struct sw_flow *flow)
334{
335	struct hlist_head *head;
336
337	head = find_bucket(ti, flow->ufid_table.hash);
338	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
339}
340
341static void flow_table_copy_flows(struct table_instance *old,
342				  struct table_instance *new, bool ufid)
343{
344	int old_ver;
345	int i;
346
347	old_ver = old->node_ver;
348	new->node_ver = !old_ver;
349
350	/* Insert in new table. */
351	for (i = 0; i < old->n_buckets; i++) {
352		struct sw_flow *flow;
353		struct hlist_head *head;
354
355		head = flex_array_get(old->buckets, i);
356
357		if (ufid)
358			hlist_for_each_entry(flow, head,
359					     ufid_table.node[old_ver])
360				ufid_table_instance_insert(new, flow);
361		else
362			hlist_for_each_entry(flow, head,
363					     flow_table.node[old_ver])
364				table_instance_insert(new, flow);
365	}
366
367	old->keep_flows = true;
368}
369
370static struct table_instance *table_instance_rehash(struct table_instance *ti,
371						    int n_buckets, bool ufid)
372{
373	struct table_instance *new_ti;
374
375	new_ti = table_instance_alloc(n_buckets);
376	if (!new_ti)
377		return NULL;
378
379	flow_table_copy_flows(ti, new_ti, ufid);
380
381	return new_ti;
382}
383
384int ovs_flow_tbl_flush(struct flow_table *flow_table)
385{
386	struct table_instance *old_ti, *new_ti;
387	struct table_instance *old_ufid_ti, *new_ufid_ti;
388
389	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
390	if (!new_ti)
391		return -ENOMEM;
392	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
393	if (!new_ufid_ti)
394		goto err_free_ti;
395
396	old_ti = ovsl_dereference(flow_table->ti);
397	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
398
399	rcu_assign_pointer(flow_table->ti, new_ti);
400	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
401	flow_table->last_rehash = jiffies;
402	flow_table->count = 0;
403	flow_table->ufid_count = 0;
404
405	table_instance_destroy(old_ti, old_ufid_ti, true);
406	return 0;
407
408err_free_ti:
409	__table_instance_destroy(new_ti);
410	return -ENOMEM;
411}
412
413static u32 flow_hash(const struct sw_flow_key *key,
414		     const struct sw_flow_key_range *range)
415{
416	int key_start = range->start;
417	int key_end = range->end;
418	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
419	int hash_u32s = (key_end - key_start) >> 2;
420
421	/* Make sure number of hash bytes are multiple of u32. */
422	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
423
424	return jhash2(hash_key, hash_u32s, 0);
425}
426
427static int flow_key_start(const struct sw_flow_key *key)
428{
429	if (key->tun_key.ipv4_dst)
430		return 0;
431	else
432		return rounddown(offsetof(struct sw_flow_key, phy),
433					  sizeof(long));
434}
435
436static bool cmp_key(const struct sw_flow_key *key1,
437		    const struct sw_flow_key *key2,
438		    int key_start, int key_end)
439{
440	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
441	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
442	long diffs = 0;
443	int i;
444
445	for (i = key_start; i < key_end;  i += sizeof(long))
446		diffs |= *cp1++ ^ *cp2++;
447
448	return diffs == 0;
449}
450
451static bool flow_cmp_masked_key(const struct sw_flow *flow,
452				const struct sw_flow_key *key,
453				const struct sw_flow_key_range *range)
454{
455	return cmp_key(&flow->key, key, range->start, range->end);
456}
457
458static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
459				      const struct sw_flow_match *match)
460{
461	struct sw_flow_key *key = match->key;
462	int key_start = flow_key_start(key);
463	int key_end = match->range.end;
464
465	BUG_ON(ovs_identifier_is_ufid(&flow->id));
466	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
467}
468
469static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
470					  const struct sw_flow_key *unmasked,
471					  const struct sw_flow_mask *mask)
472{
473	struct sw_flow *flow;
474	struct hlist_head *head;
475	u32 hash;
476	struct sw_flow_key masked_key;
477
478	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
479	hash = flow_hash(&masked_key, &mask->range);
480	head = find_bucket(ti, hash);
481	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
482		if (flow->mask == mask && flow->flow_table.hash == hash &&
483		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
484			return flow;
485	}
486	return NULL;
487}
488
489struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
490				    const struct sw_flow_key *key,
491				    u32 *n_mask_hit)
492{
493	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
494	struct sw_flow_mask *mask;
495	struct sw_flow *flow;
496
497	*n_mask_hit = 0;
498	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
499		(*n_mask_hit)++;
500		flow = masked_flow_lookup(ti, key, mask);
501		if (flow)  /* Found */
502			return flow;
503	}
504	return NULL;
505}
506
507struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
508				    const struct sw_flow_key *key)
509{
510	u32 __always_unused n_mask_hit;
511
512	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
513}
514
515struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
516					  const struct sw_flow_match *match)
517{
518	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
519	struct sw_flow_mask *mask;
520	struct sw_flow *flow;
521
522	/* Always called under ovs-mutex. */
523	list_for_each_entry(mask, &tbl->mask_list, list) {
524		flow = masked_flow_lookup(ti, match->key, mask);
525		if (flow && ovs_identifier_is_key(&flow->id) &&
526		    ovs_flow_cmp_unmasked_key(flow, match))
527			return flow;
528	}
529	return NULL;
530}
531
532static u32 ufid_hash(const struct sw_flow_id *sfid)
533{
534	return jhash(sfid->ufid, sfid->ufid_len, 0);
535}
536
537static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
538			      const struct sw_flow_id *sfid)
539{
540	if (flow->id.ufid_len != sfid->ufid_len)
541		return false;
542
543	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
544}
545
546bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
547{
548	if (ovs_identifier_is_ufid(&flow->id))
549		return flow_cmp_masked_key(flow, match->key, &match->range);
550
551	return ovs_flow_cmp_unmasked_key(flow, match);
552}
553
554struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
555					 const struct sw_flow_id *ufid)
556{
557	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
558	struct sw_flow *flow;
559	struct hlist_head *head;
560	u32 hash;
561
562	hash = ufid_hash(ufid);
563	head = find_bucket(ti, hash);
564	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
565		if (flow->ufid_table.hash == hash &&
566		    ovs_flow_cmp_ufid(flow, ufid))
567			return flow;
568	}
569	return NULL;
570}
571
572int ovs_flow_tbl_num_masks(const struct flow_table *table)
573{
574	struct sw_flow_mask *mask;
575	int num = 0;
576
577	list_for_each_entry(mask, &table->mask_list, list)
578		num++;
579
580	return num;
581}
582
583static struct table_instance *table_instance_expand(struct table_instance *ti,
584						    bool ufid)
585{
586	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
587}
588
589/* Remove 'mask' from the mask list, if it is not needed any more. */
590static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
591{
592	if (mask) {
593		/* ovs-lock is required to protect mask-refcount and
594		 * mask list.
595		 */
596		ASSERT_OVSL();
597		BUG_ON(!mask->ref_count);
598		mask->ref_count--;
599
600		if (!mask->ref_count) {
601			list_del_rcu(&mask->list);
602			kfree_rcu(mask, rcu);
603		}
604	}
605}
606
607/* Must be called with OVS mutex held. */
608void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
609{
610	struct table_instance *ti = ovsl_dereference(table->ti);
611	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
612
613	BUG_ON(table->count == 0);
614	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
615	table->count--;
616	if (ovs_identifier_is_ufid(&flow->id)) {
617		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
618		table->ufid_count--;
619	}
620
621	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
622	 * accessible as long as the RCU read lock is held.
623	 */
624	flow_mask_remove(table, flow->mask);
625}
626
627static struct sw_flow_mask *mask_alloc(void)
628{
629	struct sw_flow_mask *mask;
630
631	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
632	if (mask)
633		mask->ref_count = 1;
634
635	return mask;
636}
637
638static bool mask_equal(const struct sw_flow_mask *a,
639		       const struct sw_flow_mask *b)
640{
641	const u8 *a_ = (const u8 *)&a->key + a->range.start;
642	const u8 *b_ = (const u8 *)&b->key + b->range.start;
643
644	return  (a->range.end == b->range.end)
645		&& (a->range.start == b->range.start)
646		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
647}
648
649static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
650					   const struct sw_flow_mask *mask)
651{
652	struct list_head *ml;
653
654	list_for_each(ml, &tbl->mask_list) {
655		struct sw_flow_mask *m;
656		m = container_of(ml, struct sw_flow_mask, list);
657		if (mask_equal(mask, m))
658			return m;
659	}
660
661	return NULL;
662}
663
664/* Add 'mask' into the mask list, if it is not already there. */
665static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
666			    const struct sw_flow_mask *new)
667{
668	struct sw_flow_mask *mask;
669	mask = flow_mask_find(tbl, new);
670	if (!mask) {
671		/* Allocate a new mask if none exsits. */
672		mask = mask_alloc();
673		if (!mask)
674			return -ENOMEM;
675		mask->key = new->key;
676		mask->range = new->range;
677		list_add_rcu(&mask->list, &tbl->mask_list);
678	} else {
679		BUG_ON(!mask->ref_count);
680		mask->ref_count++;
681	}
682
683	flow->mask = mask;
684	return 0;
685}
686
687/* Must be called with OVS mutex held. */
688static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
689{
690	struct table_instance *new_ti = NULL;
691	struct table_instance *ti;
692
693	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
694	ti = ovsl_dereference(table->ti);
695	table_instance_insert(ti, flow);
696	table->count++;
697
698	/* Expand table, if necessary, to make room. */
699	if (table->count > ti->n_buckets)
700		new_ti = table_instance_expand(ti, false);
701	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
702		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
703
704	if (new_ti) {
705		rcu_assign_pointer(table->ti, new_ti);
706		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
707		table->last_rehash = jiffies;
708	}
709}
710
711/* Must be called with OVS mutex held. */
712static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
713{
714	struct table_instance *ti;
715
716	flow->ufid_table.hash = ufid_hash(&flow->id);
717	ti = ovsl_dereference(table->ufid_ti);
718	ufid_table_instance_insert(ti, flow);
719	table->ufid_count++;
720
721	/* Expand table, if necessary, to make room. */
722	if (table->ufid_count > ti->n_buckets) {
723		struct table_instance *new_ti;
724
725		new_ti = table_instance_expand(ti, true);
726		if (new_ti) {
727			rcu_assign_pointer(table->ufid_ti, new_ti);
728			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
729		}
730	}
731}
732
733/* Must be called with OVS mutex held. */
734int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
735			const struct sw_flow_mask *mask)
736{
737	int err;
738
739	err = flow_mask_insert(table, flow, mask);
740	if (err)
741		return err;
742	flow_key_insert(table, flow);
743	if (ovs_identifier_is_ufid(&flow->id))
744		flow_ufid_insert(table, flow);
745
746	return 0;
747}
748
749/* Initializes the flow module.
750 * Returns zero if successful or a negative error code. */
751int ovs_flow_init(void)
752{
753	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
754	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
755
756	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
757				       + (num_possible_nodes()
758					  * sizeof(struct flow_stats *)),
759				       0, 0, NULL);
760	if (flow_cache == NULL)
761		return -ENOMEM;
762
763	flow_stats_cache
764		= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
765				    0, SLAB_HWCACHE_ALIGN, NULL);
766	if (flow_stats_cache == NULL) {
767		kmem_cache_destroy(flow_cache);
768		flow_cache = NULL;
769		return -ENOMEM;
770	}
771
772	return 0;
773}
774
775/* Uninitializes the flow module. */
776void ovs_flow_exit(void)
777{
778	kmem_cache_destroy(flow_stats_cache);
779	kmem_cache_destroy(flow_cache);
780}
781