1#include <linux/if.h>
2#include <linux/if_ether.h>
3#include <linux/if_link.h>
4#include <linux/netdevice.h>
5#include <linux/in.h>
6#include <linux/types.h>
7#include <linux/skbuff.h>
8#include <net/flow_dissector.h>
9#include "enic_res.h"
10#include "enic_clsf.h"
11
12/* enic_addfltr_5t - Add ipv4 5tuple filter
13 *	@enic: enic struct of vnic
14 *	@keys: flow_keys of ipv4 5tuple
15 *	@rq: rq number to steer to
16 *
17 * This function returns filter_id(hardware_id) of the filter
18 * added. In case of error it returns a negative number.
19 */
20int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
21{
22	int res;
23	struct filter data;
24
25	switch (keys->basic.ip_proto) {
26	case IPPROTO_TCP:
27		data.u.ipv4.protocol = PROTO_TCP;
28		break;
29	case IPPROTO_UDP:
30		data.u.ipv4.protocol = PROTO_UDP;
31		break;
32	default:
33		return -EPROTONOSUPPORT;
34	};
35	data.type = FILTER_IPV4_5TUPLE;
36	data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
37	data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
38	data.u.ipv4.src_port = ntohs(keys->ports.src);
39	data.u.ipv4.dst_port = ntohs(keys->ports.dst);
40	data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
41
42	spin_lock_bh(&enic->devcmd_lock);
43	res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
44	spin_unlock_bh(&enic->devcmd_lock);
45	res = (res == 0) ? rq : res;
46
47	return res;
48}
49
50/* enic_delfltr - Delete clsf filter
51 *	@enic: enic struct of vnic
52 *	@filter_id: filter_is(hardware_id) of filter to be deleted
53 *
54 * This function returns zero in case of success, negative number incase of
55 * error.
56 */
57int enic_delfltr(struct enic *enic, u16 filter_id)
58{
59	int ret;
60
61	spin_lock_bh(&enic->devcmd_lock);
62	ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL);
63	spin_unlock_bh(&enic->devcmd_lock);
64
65	return ret;
66}
67
68/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
69 *	@enic: enic data
70 */
71void enic_rfs_flw_tbl_init(struct enic *enic)
72{
73	int i;
74
75	spin_lock_init(&enic->rfs_h.lock);
76	for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
77		INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
78	enic->rfs_h.max = enic->config.num_arfs;
79	enic->rfs_h.free = enic->rfs_h.max;
80	enic->rfs_h.toclean = 0;
81	enic_rfs_timer_start(enic);
82}
83
84void enic_rfs_flw_tbl_free(struct enic *enic)
85{
86	int i;
87
88	enic_rfs_timer_stop(enic);
89	spin_lock_bh(&enic->rfs_h.lock);
90	enic->rfs_h.free = 0;
91	for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
92		struct hlist_head *hhead;
93		struct hlist_node *tmp;
94		struct enic_rfs_fltr_node *n;
95
96		hhead = &enic->rfs_h.ht_head[i];
97		hlist_for_each_entry_safe(n, tmp, hhead, node) {
98			enic_delfltr(enic, n->fltr_id);
99			hlist_del(&n->node);
100			kfree(n);
101		}
102	}
103	spin_unlock_bh(&enic->rfs_h.lock);
104}
105
106struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
107{
108	int i;
109
110	for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
111		struct hlist_head *hhead;
112		struct hlist_node *tmp;
113		struct enic_rfs_fltr_node *n;
114
115		hhead = &enic->rfs_h.ht_head[i];
116		hlist_for_each_entry_safe(n, tmp, hhead, node)
117			if (n->fltr_id == fltr_id)
118				return n;
119	}
120
121	return NULL;
122}
123
124#ifdef CONFIG_RFS_ACCEL
125void enic_flow_may_expire(unsigned long data)
126{
127	struct enic *enic = (struct enic *)data;
128	bool res;
129	int j;
130
131	spin_lock_bh(&enic->rfs_h.lock);
132	for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
133		struct hlist_head *hhead;
134		struct hlist_node *tmp;
135		struct enic_rfs_fltr_node *n;
136
137		hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
138		hlist_for_each_entry_safe(n, tmp, hhead, node) {
139			res = rps_may_expire_flow(enic->netdev, n->rq_id,
140						  n->flow_id, n->fltr_id);
141			if (res) {
142				res = enic_delfltr(enic, n->fltr_id);
143				if (unlikely(res))
144					continue;
145				hlist_del(&n->node);
146				kfree(n);
147				enic->rfs_h.free++;
148			}
149		}
150	}
151	spin_unlock_bh(&enic->rfs_h.lock);
152	mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
153}
154
155static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
156						  struct flow_keys *k)
157{
158	struct enic_rfs_fltr_node *tpos;
159
160	hlist_for_each_entry(tpos, h, node)
161		if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
162		    tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
163		    tpos->keys.ports.ports == k->ports.ports &&
164		    tpos->keys.basic.ip_proto == k->basic.ip_proto &&
165		    tpos->keys.basic.n_proto == k->basic.n_proto)
166			return tpos;
167	return NULL;
168}
169
170int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
171		       u16 rxq_index, u32 flow_id)
172{
173	struct flow_keys keys;
174	struct enic_rfs_fltr_node *n;
175	struct enic *enic;
176	u16 tbl_idx;
177	int res, i;
178
179	enic = netdev_priv(dev);
180	res = skb_flow_dissect_flow_keys(skb, &keys, 0);
181	if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
182	    (keys.basic.ip_proto != IPPROTO_TCP &&
183	     keys.basic.ip_proto != IPPROTO_UDP))
184		return -EPROTONOSUPPORT;
185
186	tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
187	spin_lock_bh(&enic->rfs_h.lock);
188	n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
189
190	if (n) { /* entry already present  */
191		if (rxq_index == n->rq_id) {
192			res = -EEXIST;
193			goto ret_unlock;
194		}
195
196		/* desired rq changed for the flow, we need to delete
197		 * old fltr and add new one
198		 *
199		 * The moment we delete the fltr, the upcoming pkts
200		 * are put it default rq based on rss. When we add
201		 * new filter, upcoming pkts are put in desired queue.
202		 * This could cause ooo pkts.
203		 *
204		 * Lets 1st try adding new fltr and then del old one.
205		 */
206		i = --enic->rfs_h.free;
207		/* clsf tbl is full, we have to del old fltr first*/
208		if (unlikely(i < 0)) {
209			enic->rfs_h.free++;
210			res = enic_delfltr(enic, n->fltr_id);
211			if (unlikely(res < 0))
212				goto ret_unlock;
213			res = enic_addfltr_5t(enic, &keys, rxq_index);
214			if (res < 0) {
215				hlist_del(&n->node);
216				enic->rfs_h.free++;
217				goto ret_unlock;
218			}
219		/* add new fltr 1st then del old fltr */
220		} else {
221			int ret;
222
223			res = enic_addfltr_5t(enic, &keys, rxq_index);
224			if (res < 0) {
225				enic->rfs_h.free++;
226				goto ret_unlock;
227			}
228			ret = enic_delfltr(enic, n->fltr_id);
229			/* deleting old fltr failed. Add old fltr to list.
230			 * enic_flow_may_expire() will try to delete it later.
231			 */
232			if (unlikely(ret < 0)) {
233				struct enic_rfs_fltr_node *d;
234				struct hlist_head *head;
235
236				head = &enic->rfs_h.ht_head[tbl_idx];
237				d = kmalloc(sizeof(*d), GFP_ATOMIC);
238				if (d) {
239					d->fltr_id = n->fltr_id;
240					INIT_HLIST_NODE(&d->node);
241					hlist_add_head(&d->node, head);
242				}
243			} else {
244				enic->rfs_h.free++;
245			}
246		}
247		n->rq_id = rxq_index;
248		n->fltr_id = res;
249		n->flow_id = flow_id;
250	/* entry not present */
251	} else {
252		i = --enic->rfs_h.free;
253		if (i <= 0) {
254			enic->rfs_h.free++;
255			res = -EBUSY;
256			goto ret_unlock;
257		}
258
259		n = kmalloc(sizeof(*n), GFP_ATOMIC);
260		if (!n) {
261			res = -ENOMEM;
262			enic->rfs_h.free++;
263			goto ret_unlock;
264		}
265
266		res = enic_addfltr_5t(enic, &keys, rxq_index);
267		if (res < 0) {
268			kfree(n);
269			enic->rfs_h.free++;
270			goto ret_unlock;
271		}
272		n->rq_id = rxq_index;
273		n->fltr_id = res;
274		n->flow_id = flow_id;
275		n->keys = keys;
276		INIT_HLIST_NODE(&n->node);
277		hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
278	}
279
280ret_unlock:
281	spin_unlock_bh(&enic->rfs_h.lock);
282	return res;
283}
284
285#endif /* CONFIG_RFS_ACCEL */
286