1/*
2 * net/sched/cls_flower.c		Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
16
17#include <linux/if_ether.h>
18#include <linux/in6.h>
19#include <linux/ip.h>
20
21#include <net/sch_generic.h>
22#include <net/pkt_cls.h>
23#include <net/ip.h>
24#include <net/flow_dissector.h>
25
26struct fl_flow_key {
27	int	indev_ifindex;
28	struct flow_dissector_key_control control;
29	struct flow_dissector_key_basic basic;
30	struct flow_dissector_key_eth_addrs eth;
31	struct flow_dissector_key_addrs ipaddrs;
32	union {
33		struct flow_dissector_key_ipv4_addrs ipv4;
34		struct flow_dissector_key_ipv6_addrs ipv6;
35	};
36	struct flow_dissector_key_ports tp;
37} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
38
39struct fl_flow_mask_range {
40	unsigned short int start;
41	unsigned short int end;
42};
43
44struct fl_flow_mask {
45	struct fl_flow_key key;
46	struct fl_flow_mask_range range;
47	struct rcu_head	rcu;
48};
49
50struct cls_fl_head {
51	struct rhashtable ht;
52	struct fl_flow_mask mask;
53	struct flow_dissector dissector;
54	u32 hgen;
55	bool mask_assigned;
56	struct list_head filters;
57	struct rhashtable_params ht_params;
58	struct rcu_head rcu;
59};
60
61struct cls_fl_filter {
62	struct rhash_head ht_node;
63	struct fl_flow_key mkey;
64	struct tcf_exts exts;
65	struct tcf_result res;
66	struct fl_flow_key key;
67	struct list_head list;
68	u32 handle;
69	struct rcu_head	rcu;
70};
71
72static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
73{
74	return mask->range.end - mask->range.start;
75}
76
77static void fl_mask_update_range(struct fl_flow_mask *mask)
78{
79	const u8 *bytes = (const u8 *) &mask->key;
80	size_t size = sizeof(mask->key);
81	size_t i, first = 0, last = size - 1;
82
83	for (i = 0; i < sizeof(mask->key); i++) {
84		if (bytes[i]) {
85			if (!first && i)
86				first = i;
87			last = i;
88		}
89	}
90	mask->range.start = rounddown(first, sizeof(long));
91	mask->range.end = roundup(last + 1, sizeof(long));
92}
93
94static void *fl_key_get_start(struct fl_flow_key *key,
95			      const struct fl_flow_mask *mask)
96{
97	return (u8 *) key + mask->range.start;
98}
99
100static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
101			      struct fl_flow_mask *mask)
102{
103	const long *lkey = fl_key_get_start(key, mask);
104	const long *lmask = fl_key_get_start(&mask->key, mask);
105	long *lmkey = fl_key_get_start(mkey, mask);
106	int i;
107
108	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
109		*lmkey++ = *lkey++ & *lmask++;
110}
111
112static void fl_clear_masked_range(struct fl_flow_key *key,
113				  struct fl_flow_mask *mask)
114{
115	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
116}
117
118static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
119		       struct tcf_result *res)
120{
121	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
122	struct cls_fl_filter *f;
123	struct fl_flow_key skb_key;
124	struct fl_flow_key skb_mkey;
125
126	fl_clear_masked_range(&skb_key, &head->mask);
127	skb_key.indev_ifindex = skb->skb_iif;
128	/* skb_flow_dissect() does not set n_proto in case an unknown protocol,
129	 * so do it rather here.
130	 */
131	skb_key.basic.n_proto = skb->protocol;
132	skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
133
134	fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
135
136	f = rhashtable_lookup_fast(&head->ht,
137				   fl_key_get_start(&skb_mkey, &head->mask),
138				   head->ht_params);
139	if (f) {
140		*res = f->res;
141		return tcf_exts_exec(skb, &f->exts, res);
142	}
143	return -1;
144}
145
146static int fl_init(struct tcf_proto *tp)
147{
148	struct cls_fl_head *head;
149
150	head = kzalloc(sizeof(*head), GFP_KERNEL);
151	if (!head)
152		return -ENOBUFS;
153
154	INIT_LIST_HEAD_RCU(&head->filters);
155	rcu_assign_pointer(tp->root, head);
156
157	return 0;
158}
159
160static void fl_destroy_filter(struct rcu_head *head)
161{
162	struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
163
164	tcf_exts_destroy(&f->exts);
165	kfree(f);
166}
167
168static bool fl_destroy(struct tcf_proto *tp, bool force)
169{
170	struct cls_fl_head *head = rtnl_dereference(tp->root);
171	struct cls_fl_filter *f, *next;
172
173	if (!force && !list_empty(&head->filters))
174		return false;
175
176	list_for_each_entry_safe(f, next, &head->filters, list) {
177		list_del_rcu(&f->list);
178		call_rcu(&f->rcu, fl_destroy_filter);
179	}
180	RCU_INIT_POINTER(tp->root, NULL);
181	if (head->mask_assigned)
182		rhashtable_destroy(&head->ht);
183	kfree_rcu(head, rcu);
184	return true;
185}
186
187static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
188{
189	struct cls_fl_head *head = rtnl_dereference(tp->root);
190	struct cls_fl_filter *f;
191
192	list_for_each_entry(f, &head->filters, list)
193		if (f->handle == handle)
194			return (unsigned long) f;
195	return 0;
196}
197
198static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
199	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
200	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
201	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
202					    .len = IFNAMSIZ },
203	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
204	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
205	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
206	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
207	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
208	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
209	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
210	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
211	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
212	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
213	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
214	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
215	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
216	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
217	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
218	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
219	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
220	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
221};
222
223static void fl_set_key_val(struct nlattr **tb,
224			   void *val, int val_type,
225			   void *mask, int mask_type, int len)
226{
227	if (!tb[val_type])
228		return;
229	memcpy(val, nla_data(tb[val_type]), len);
230	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
231		memset(mask, 0xff, len);
232	else
233		memcpy(mask, nla_data(tb[mask_type]), len);
234}
235
236static int fl_set_key(struct net *net, struct nlattr **tb,
237		      struct fl_flow_key *key, struct fl_flow_key *mask)
238{
239#ifdef CONFIG_NET_CLS_IND
240	if (tb[TCA_FLOWER_INDEV]) {
241		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
242		if (err < 0)
243			return err;
244		key->indev_ifindex = err;
245		mask->indev_ifindex = 0xffffffff;
246	}
247#endif
248
249	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
250		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
251		       sizeof(key->eth.dst));
252	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
253		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
254		       sizeof(key->eth.src));
255
256	fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
257		       &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
258		       sizeof(key->basic.n_proto));
259
260	if (key->basic.n_proto == htons(ETH_P_IP) ||
261	    key->basic.n_proto == htons(ETH_P_IPV6)) {
262		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
263			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
264			       sizeof(key->basic.ip_proto));
265	}
266
267	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
268		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
269		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
270			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
271			       sizeof(key->ipv4.src));
272		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
273			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
274			       sizeof(key->ipv4.dst));
275	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
276		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
277		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
278			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
279			       sizeof(key->ipv6.src));
280		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
281			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
282			       sizeof(key->ipv6.dst));
283	}
284
285	if (key->basic.ip_proto == IPPROTO_TCP) {
286		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
287			       &mask->tp.src, TCA_FLOWER_UNSPEC,
288			       sizeof(key->tp.src));
289		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
290			       &mask->tp.dst, TCA_FLOWER_UNSPEC,
291			       sizeof(key->tp.dst));
292	} else if (key->basic.ip_proto == IPPROTO_UDP) {
293		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
294			       &mask->tp.src, TCA_FLOWER_UNSPEC,
295			       sizeof(key->tp.src));
296		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
297			       &mask->tp.dst, TCA_FLOWER_UNSPEC,
298			       sizeof(key->tp.dst));
299	}
300
301	return 0;
302}
303
304static bool fl_mask_eq(struct fl_flow_mask *mask1,
305		       struct fl_flow_mask *mask2)
306{
307	const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
308	const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
309
310	return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
311	       !memcmp(lmask1, lmask2, fl_mask_range(mask1));
312}
313
314static const struct rhashtable_params fl_ht_params = {
315	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
316	.head_offset = offsetof(struct cls_fl_filter, ht_node),
317	.automatic_shrinking = true,
318};
319
320static int fl_init_hashtable(struct cls_fl_head *head,
321			     struct fl_flow_mask *mask)
322{
323	head->ht_params = fl_ht_params;
324	head->ht_params.key_len = fl_mask_range(mask);
325	head->ht_params.key_offset += mask->range.start;
326
327	return rhashtable_init(&head->ht, &head->ht_params);
328}
329
330#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
331#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
332#define FL_KEY_MEMBER_END_OFFSET(member)					\
333	(FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
334
335#define FL_KEY_IN_RANGE(mask, member)						\
336        (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end &&			\
337         FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
338
339#define FL_KEY_SET(keys, cnt, id, member)					\
340	do {									\
341		keys[cnt].key_id = id;						\
342		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
343		cnt++;								\
344	} while(0);
345
346#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member)			\
347	do {									\
348		if (FL_KEY_IN_RANGE(mask, member))				\
349			FL_KEY_SET(keys, cnt, id, member);			\
350	} while(0);
351
352static void fl_init_dissector(struct cls_fl_head *head,
353			      struct fl_flow_mask *mask)
354{
355	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
356	size_t cnt = 0;
357
358	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
359	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
360	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
361			       FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
362	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
363			       FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
364	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
365			       FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
366	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
367			       FLOW_DISSECTOR_KEY_PORTS, tp);
368
369	skb_flow_dissector_init(&head->dissector, keys, cnt);
370}
371
372static int fl_check_assign_mask(struct cls_fl_head *head,
373				struct fl_flow_mask *mask)
374{
375	int err;
376
377	if (head->mask_assigned) {
378		if (!fl_mask_eq(&head->mask, mask))
379			return -EINVAL;
380		else
381			return 0;
382	}
383
384	/* Mask is not assigned yet. So assign it and init hashtable
385	 * according to that.
386	 */
387	err = fl_init_hashtable(head, mask);
388	if (err)
389		return err;
390	memcpy(&head->mask, mask, sizeof(head->mask));
391	head->mask_assigned = true;
392
393	fl_init_dissector(head, mask);
394
395	return 0;
396}
397
398static int fl_set_parms(struct net *net, struct tcf_proto *tp,
399			struct cls_fl_filter *f, struct fl_flow_mask *mask,
400			unsigned long base, struct nlattr **tb,
401			struct nlattr *est, bool ovr)
402{
403	struct tcf_exts e;
404	int err;
405
406	tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
407	err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
408	if (err < 0)
409		return err;
410
411	if (tb[TCA_FLOWER_CLASSID]) {
412		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
413		tcf_bind_filter(tp, &f->res, base);
414	}
415
416	err = fl_set_key(net, tb, &f->key, &mask->key);
417	if (err)
418		goto errout;
419
420	fl_mask_update_range(mask);
421	fl_set_masked_key(&f->mkey, &f->key, mask);
422
423	tcf_exts_change(tp, &f->exts, &e);
424
425	return 0;
426errout:
427	tcf_exts_destroy(&e);
428	return err;
429}
430
431static u32 fl_grab_new_handle(struct tcf_proto *tp,
432			      struct cls_fl_head *head)
433{
434	unsigned int i = 0x80000000;
435	u32 handle;
436
437	do {
438		if (++head->hgen == 0x7FFFFFFF)
439			head->hgen = 1;
440	} while (--i > 0 && fl_get(tp, head->hgen));
441
442	if (unlikely(i == 0)) {
443		pr_err("Insufficient number of handles\n");
444		handle = 0;
445	} else {
446		handle = head->hgen;
447	}
448
449	return handle;
450}
451
452static int fl_change(struct net *net, struct sk_buff *in_skb,
453		     struct tcf_proto *tp, unsigned long base,
454		     u32 handle, struct nlattr **tca,
455		     unsigned long *arg, bool ovr)
456{
457	struct cls_fl_head *head = rtnl_dereference(tp->root);
458	struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
459	struct cls_fl_filter *fnew;
460	struct nlattr *tb[TCA_FLOWER_MAX + 1];
461	struct fl_flow_mask mask = {};
462	int err;
463
464	if (!tca[TCA_OPTIONS])
465		return -EINVAL;
466
467	err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
468	if (err < 0)
469		return err;
470
471	if (fold && handle && fold->handle != handle)
472		return -EINVAL;
473
474	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
475	if (!fnew)
476		return -ENOBUFS;
477
478	tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
479
480	if (!handle) {
481		handle = fl_grab_new_handle(tp, head);
482		if (!handle) {
483			err = -EINVAL;
484			goto errout;
485		}
486	}
487	fnew->handle = handle;
488
489	err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
490	if (err)
491		goto errout;
492
493	err = fl_check_assign_mask(head, &mask);
494	if (err)
495		goto errout;
496
497	err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
498				     head->ht_params);
499	if (err)
500		goto errout;
501	if (fold)
502		rhashtable_remove_fast(&head->ht, &fold->ht_node,
503				       head->ht_params);
504
505	*arg = (unsigned long) fnew;
506
507	if (fold) {
508		list_replace_rcu(&fold->list, &fnew->list);
509		tcf_unbind_filter(tp, &fold->res);
510		call_rcu(&fold->rcu, fl_destroy_filter);
511	} else {
512		list_add_tail_rcu(&fnew->list, &head->filters);
513	}
514
515	return 0;
516
517errout:
518	kfree(fnew);
519	return err;
520}
521
522static int fl_delete(struct tcf_proto *tp, unsigned long arg)
523{
524	struct cls_fl_head *head = rtnl_dereference(tp->root);
525	struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
526
527	rhashtable_remove_fast(&head->ht, &f->ht_node,
528			       head->ht_params);
529	list_del_rcu(&f->list);
530	tcf_unbind_filter(tp, &f->res);
531	call_rcu(&f->rcu, fl_destroy_filter);
532	return 0;
533}
534
535static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
536{
537	struct cls_fl_head *head = rtnl_dereference(tp->root);
538	struct cls_fl_filter *f;
539
540	list_for_each_entry_rcu(f, &head->filters, list) {
541		if (arg->count < arg->skip)
542			goto skip;
543		if (arg->fn(tp, (unsigned long) f, arg) < 0) {
544			arg->stop = 1;
545			break;
546		}
547skip:
548		arg->count++;
549	}
550}
551
552static int fl_dump_key_val(struct sk_buff *skb,
553			   void *val, int val_type,
554			   void *mask, int mask_type, int len)
555{
556	int err;
557
558	if (!memchr_inv(mask, 0, len))
559		return 0;
560	err = nla_put(skb, val_type, len, val);
561	if (err)
562		return err;
563	if (mask_type != TCA_FLOWER_UNSPEC) {
564		err = nla_put(skb, mask_type, len, mask);
565		if (err)
566			return err;
567	}
568	return 0;
569}
570
571static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
572		   struct sk_buff *skb, struct tcmsg *t)
573{
574	struct cls_fl_head *head = rtnl_dereference(tp->root);
575	struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
576	struct nlattr *nest;
577	struct fl_flow_key *key, *mask;
578
579	if (!f)
580		return skb->len;
581
582	t->tcm_handle = f->handle;
583
584	nest = nla_nest_start(skb, TCA_OPTIONS);
585	if (!nest)
586		goto nla_put_failure;
587
588	if (f->res.classid &&
589	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
590		goto nla_put_failure;
591
592	key = &f->key;
593	mask = &head->mask.key;
594
595	if (mask->indev_ifindex) {
596		struct net_device *dev;
597
598		dev = __dev_get_by_index(net, key->indev_ifindex);
599		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
600			goto nla_put_failure;
601	}
602
603	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
604			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
605			    sizeof(key->eth.dst)) ||
606	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
607			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
608			    sizeof(key->eth.src)) ||
609	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
610			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
611			    sizeof(key->basic.n_proto)))
612		goto nla_put_failure;
613	if ((key->basic.n_proto == htons(ETH_P_IP) ||
614	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
615	    fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
616			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
617			    sizeof(key->basic.ip_proto)))
618		goto nla_put_failure;
619
620	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
621	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
622			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
623			     sizeof(key->ipv4.src)) ||
624	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
625			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
626			     sizeof(key->ipv4.dst))))
627		goto nla_put_failure;
628	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
629		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
630				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
631				  sizeof(key->ipv6.src)) ||
632		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
633				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
634				  sizeof(key->ipv6.dst))))
635		goto nla_put_failure;
636
637	if (key->basic.ip_proto == IPPROTO_TCP &&
638	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
639			     &mask->tp.src, TCA_FLOWER_UNSPEC,
640			     sizeof(key->tp.src)) ||
641	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
642			     &mask->tp.dst, TCA_FLOWER_UNSPEC,
643			     sizeof(key->tp.dst))))
644		goto nla_put_failure;
645	else if (key->basic.ip_proto == IPPROTO_UDP &&
646		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
647				  &mask->tp.src, TCA_FLOWER_UNSPEC,
648				  sizeof(key->tp.src)) ||
649		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
650				  &mask->tp.dst, TCA_FLOWER_UNSPEC,
651				  sizeof(key->tp.dst))))
652		goto nla_put_failure;
653
654	if (tcf_exts_dump(skb, &f->exts))
655		goto nla_put_failure;
656
657	nla_nest_end(skb, nest);
658
659	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
660		goto nla_put_failure;
661
662	return skb->len;
663
664nla_put_failure:
665	nla_nest_cancel(skb, nest);
666	return -1;
667}
668
669static struct tcf_proto_ops cls_fl_ops __read_mostly = {
670	.kind		= "flower",
671	.classify	= fl_classify,
672	.init		= fl_init,
673	.destroy	= fl_destroy,
674	.get		= fl_get,
675	.change		= fl_change,
676	.delete		= fl_delete,
677	.walk		= fl_walk,
678	.dump		= fl_dump,
679	.owner		= THIS_MODULE,
680};
681
682static int __init cls_fl_init(void)
683{
684	return register_tcf_proto_ops(&cls_fl_ops);
685}
686
687static void __exit cls_fl_exit(void)
688{
689	unregister_tcf_proto_ops(&cls_fl_ops);
690}
691
692module_init(cls_fl_init);
693module_exit(cls_fl_exit);
694
695MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
696MODULE_DESCRIPTION("Flower classifier");
697MODULE_LICENSE("GPL v2");
698