1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/flow_table.h>
38 #include "en.h"
39 
40 enum {
41 	MLX5E_FULLMATCH = 0,
42 	MLX5E_ALLMULTI  = 1,
43 	MLX5E_PROMISC   = 2,
44 };
45 
46 enum {
47 	MLX5E_UC        = 0,
48 	MLX5E_MC_IPV4   = 1,
49 	MLX5E_MC_IPV6   = 2,
50 	MLX5E_MC_OTHER  = 3,
51 };
52 
53 enum {
54 	MLX5E_ACTION_NONE = 0,
55 	MLX5E_ACTION_ADD  = 1,
56 	MLX5E_ACTION_DEL  = 2,
57 };
58 
59 struct mlx5e_eth_addr_hash_node {
60 	struct hlist_node          hlist;
61 	u8                         action;
62 	struct mlx5e_eth_addr_info ai;
63 };
64 
mlx5e_hash_eth_addr(u8 * addr)65 static inline int mlx5e_hash_eth_addr(u8 *addr)
66 {
67 	return addr[5];
68 }
69 
mlx5e_add_eth_addr_to_hash(struct hlist_head * hash,u8 * addr)70 static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
71 {
72 	struct mlx5e_eth_addr_hash_node *hn;
73 	int ix = mlx5e_hash_eth_addr(addr);
74 	int found = 0;
75 
76 	hlist_for_each_entry(hn, &hash[ix], hlist)
77 		if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
78 			found = 1;
79 			break;
80 		}
81 
82 	if (found) {
83 		hn->action = MLX5E_ACTION_NONE;
84 		return;
85 	}
86 
87 	hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
88 	if (!hn)
89 		return;
90 
91 	ether_addr_copy(hn->ai.addr, addr);
92 	hn->action = MLX5E_ACTION_ADD;
93 
94 	hlist_add_head(&hn->hlist, &hash[ix]);
95 }
96 
mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node * hn)97 static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
98 {
99 	hlist_del(&hn->hlist);
100 	kfree(hn);
101 }
102 
mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv * priv,struct mlx5e_eth_addr_info * ai)103 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
104 					       struct mlx5e_eth_addr_info *ai)
105 {
106 	void *ft = priv->ft.main;
107 
108 	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
109 		mlx5_del_flow_table_entry(ft,
110 					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
111 
112 	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
113 		mlx5_del_flow_table_entry(ft,
114 					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
115 
116 	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
117 		mlx5_del_flow_table_entry(ft,
118 					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
119 
120 	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
121 		mlx5_del_flow_table_entry(ft,
122 					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
123 
124 	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
125 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
126 
127 	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
128 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
129 
130 	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
131 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
132 
133 	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
134 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
135 
136 	if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
137 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
138 
139 	if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
140 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
141 
142 	if (ai->tt_vec & BIT(MLX5E_TT_ANY))
143 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
144 }
145 
mlx5e_get_eth_addr_type(u8 * addr)146 static int mlx5e_get_eth_addr_type(u8 *addr)
147 {
148 	if (is_unicast_ether_addr(addr))
149 		return MLX5E_UC;
150 
151 	if ((addr[0] == 0x01) &&
152 	    (addr[1] == 0x00) &&
153 	    (addr[2] == 0x5e) &&
154 	   !(addr[3] &  0x80))
155 		return MLX5E_MC_IPV4;
156 
157 	if ((addr[0] == 0x33) &&
158 	    (addr[1] == 0x33))
159 		return MLX5E_MC_IPV6;
160 
161 	return MLX5E_MC_OTHER;
162 }
163 
mlx5e_get_tt_vec(struct mlx5e_eth_addr_info * ai,int type)164 static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
165 {
166 	int eth_addr_type;
167 	u32 ret;
168 
169 	switch (type) {
170 	case MLX5E_FULLMATCH:
171 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
172 		switch (eth_addr_type) {
173 		case MLX5E_UC:
174 			ret =
175 				BIT(MLX5E_TT_IPV4_TCP)       |
176 				BIT(MLX5E_TT_IPV6_TCP)       |
177 				BIT(MLX5E_TT_IPV4_UDP)       |
178 				BIT(MLX5E_TT_IPV6_UDP)       |
179 				BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
180 				BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
181 				BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
182 				BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
183 				BIT(MLX5E_TT_IPV4)           |
184 				BIT(MLX5E_TT_IPV6)           |
185 				BIT(MLX5E_TT_ANY)            |
186 				0;
187 			break;
188 
189 		case MLX5E_MC_IPV4:
190 			ret =
191 				BIT(MLX5E_TT_IPV4_UDP)       |
192 				BIT(MLX5E_TT_IPV4)           |
193 				0;
194 			break;
195 
196 		case MLX5E_MC_IPV6:
197 			ret =
198 				BIT(MLX5E_TT_IPV6_UDP)       |
199 				BIT(MLX5E_TT_IPV6)           |
200 				0;
201 			break;
202 
203 		case MLX5E_MC_OTHER:
204 			ret =
205 				BIT(MLX5E_TT_ANY)            |
206 				0;
207 			break;
208 		}
209 
210 		break;
211 
212 	case MLX5E_ALLMULTI:
213 		ret =
214 			BIT(MLX5E_TT_IPV4_UDP) |
215 			BIT(MLX5E_TT_IPV6_UDP) |
216 			BIT(MLX5E_TT_IPV4)     |
217 			BIT(MLX5E_TT_IPV6)     |
218 			BIT(MLX5E_TT_ANY)      |
219 			0;
220 		break;
221 
222 	default: /* MLX5E_PROMISC */
223 		ret =
224 			BIT(MLX5E_TT_IPV4_TCP)       |
225 			BIT(MLX5E_TT_IPV6_TCP)       |
226 			BIT(MLX5E_TT_IPV4_UDP)       |
227 			BIT(MLX5E_TT_IPV6_UDP)       |
228 			BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
229 			BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
230 			BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
231 			BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
232 			BIT(MLX5E_TT_IPV4)           |
233 			BIT(MLX5E_TT_IPV6)           |
234 			BIT(MLX5E_TT_ANY)            |
235 			0;
236 		break;
237 	}
238 
239 	return ret;
240 }
241 
__mlx5e_add_eth_addr_rule(struct mlx5e_priv * priv,struct mlx5e_eth_addr_info * ai,int type,void * flow_context,void * match_criteria)242 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
243 				     struct mlx5e_eth_addr_info *ai, int type,
244 				     void *flow_context, void *match_criteria)
245 {
246 	u8 match_criteria_enable = 0;
247 	void *match_value;
248 	void *dest;
249 	u8   *dmac;
250 	u8   *match_criteria_dmac;
251 	void *ft   = priv->ft.main;
252 	u32  *tirn = priv->tirn;
253 	u32  *ft_ix;
254 	u32  tt_vec;
255 	int  err;
256 
257 	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
258 	dmac = MLX5_ADDR_OF(fte_match_param, match_value,
259 			    outer_headers.dmac_47_16);
260 	match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
261 					   outer_headers.dmac_47_16);
262 	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
263 
264 	MLX5_SET(flow_context, flow_context, action,
265 		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
266 	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
267 	MLX5_SET(dest_format_struct, dest, destination_type,
268 		 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
269 
270 	switch (type) {
271 	case MLX5E_FULLMATCH:
272 		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
273 		memset(match_criteria_dmac, 0xff, ETH_ALEN);
274 		ether_addr_copy(dmac, ai->addr);
275 		break;
276 
277 	case MLX5E_ALLMULTI:
278 		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
279 		match_criteria_dmac[0] = 0x01;
280 		dmac[0] = 0x01;
281 		break;
282 
283 	case MLX5E_PROMISC:
284 		break;
285 	}
286 
287 	tt_vec = mlx5e_get_tt_vec(ai, type);
288 
289 	ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
290 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
291 		MLX5_SET(dest_format_struct, dest, destination_id,
292 			 tirn[MLX5E_TT_ANY]);
293 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
294 						match_criteria, flow_context,
295 						ft_ix);
296 		if (err)
297 			goto err_del_ai;
298 
299 		ai->tt_vec |= BIT(MLX5E_TT_ANY);
300 	}
301 
302 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
303 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
304 			 outer_headers.ethertype);
305 
306 	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
307 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
308 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
309 			 ETH_P_IP);
310 		MLX5_SET(dest_format_struct, dest, destination_id,
311 			 tirn[MLX5E_TT_IPV4]);
312 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
313 						match_criteria, flow_context,
314 						ft_ix);
315 		if (err)
316 			goto err_del_ai;
317 
318 		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
319 	}
320 
321 	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
322 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
323 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
324 			 ETH_P_IPV6);
325 		MLX5_SET(dest_format_struct, dest, destination_id,
326 			 tirn[MLX5E_TT_IPV6]);
327 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
328 						match_criteria, flow_context,
329 						ft_ix);
330 		if (err)
331 			goto err_del_ai;
332 
333 		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
334 	}
335 
336 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
337 			 outer_headers.ip_protocol);
338 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
339 		 IPPROTO_UDP);
340 
341 	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
342 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
343 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
344 			 ETH_P_IP);
345 		MLX5_SET(dest_format_struct, dest, destination_id,
346 			 tirn[MLX5E_TT_IPV4_UDP]);
347 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
348 						match_criteria, flow_context,
349 						ft_ix);
350 		if (err)
351 			goto err_del_ai;
352 
353 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
354 	}
355 
356 	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
357 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
358 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
359 			 ETH_P_IPV6);
360 		MLX5_SET(dest_format_struct, dest, destination_id,
361 			 tirn[MLX5E_TT_IPV6_UDP]);
362 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
363 						match_criteria, flow_context,
364 						ft_ix);
365 		if (err)
366 			goto err_del_ai;
367 
368 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
369 	}
370 
371 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
372 		 IPPROTO_TCP);
373 
374 	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
375 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
376 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
377 			 ETH_P_IP);
378 		MLX5_SET(dest_format_struct, dest, destination_id,
379 			 tirn[MLX5E_TT_IPV4_TCP]);
380 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
381 						match_criteria, flow_context,
382 						ft_ix);
383 		if (err)
384 			goto err_del_ai;
385 
386 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
387 	}
388 
389 	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
390 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
391 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
392 			 ETH_P_IPV6);
393 		MLX5_SET(dest_format_struct, dest, destination_id,
394 			 tirn[MLX5E_TT_IPV6_TCP]);
395 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
396 						match_criteria, flow_context,
397 						ft_ix);
398 		if (err)
399 			goto err_del_ai;
400 
401 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
402 	}
403 
404 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
405 		 IPPROTO_AH);
406 
407 	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
408 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
409 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
410 			 ETH_P_IP);
411 		MLX5_SET(dest_format_struct, dest, destination_id,
412 			 tirn[MLX5E_TT_IPV4_IPSEC_AH]);
413 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
414 						match_criteria, flow_context,
415 						ft_ix);
416 		if (err)
417 			goto err_del_ai;
418 
419 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
420 	}
421 
422 	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
423 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
424 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
425 			 ETH_P_IPV6);
426 		MLX5_SET(dest_format_struct, dest, destination_id,
427 			 tirn[MLX5E_TT_IPV6_IPSEC_AH]);
428 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
429 						match_criteria, flow_context,
430 						ft_ix);
431 		if (err)
432 			goto err_del_ai;
433 
434 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
435 	}
436 
437 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
438 		 IPPROTO_ESP);
439 
440 	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
441 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
442 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
443 			 ETH_P_IP);
444 		MLX5_SET(dest_format_struct, dest, destination_id,
445 			 tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
446 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
447 						match_criteria, flow_context,
448 						ft_ix);
449 		if (err)
450 			goto err_del_ai;
451 
452 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
453 	}
454 
455 	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
456 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
457 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
458 			 ETH_P_IPV6);
459 		MLX5_SET(dest_format_struct, dest, destination_id,
460 			 tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
461 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
462 						match_criteria, flow_context,
463 						ft_ix);
464 		if (err)
465 			goto err_del_ai;
466 
467 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
468 	}
469 
470 	return 0;
471 
472 err_del_ai:
473 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
474 
475 	return err;
476 }
477 
mlx5e_add_eth_addr_rule(struct mlx5e_priv * priv,struct mlx5e_eth_addr_info * ai,int type)478 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
479 				   struct mlx5e_eth_addr_info *ai, int type)
480 {
481 	u32 *flow_context;
482 	u32 *match_criteria;
483 	int err;
484 
485 	flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
486 				      MLX5_ST_SZ_BYTES(dest_format_struct));
487 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
488 	if (!flow_context || !match_criteria) {
489 		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
490 		err = -ENOMEM;
491 		goto add_eth_addr_rule_out;
492 	}
493 
494 	err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
495 					match_criteria);
496 	if (err)
497 		netdev_err(priv->netdev, "%s: failed\n", __func__);
498 
499 add_eth_addr_rule_out:
500 	kvfree(match_criteria);
501 	kvfree(flow_context);
502 	return err;
503 }
504 
505 enum mlx5e_vlan_rule_type {
506 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
507 	MLX5E_VLAN_RULE_TYPE_ANY_VID,
508 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
509 };
510 
mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)511 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
512 			       enum mlx5e_vlan_rule_type rule_type, u16 vid)
513 {
514 	u8 match_criteria_enable = 0;
515 	u32 *flow_context;
516 	void *match_value;
517 	void *dest;
518 	u32 *match_criteria;
519 	u32 *ft_ix;
520 	int err;
521 
522 	flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
523 				      MLX5_ST_SZ_BYTES(dest_format_struct));
524 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
525 	if (!flow_context || !match_criteria) {
526 		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
527 		err = -ENOMEM;
528 		goto add_vlan_rule_out;
529 	}
530 	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
531 	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
532 
533 	MLX5_SET(flow_context, flow_context, action,
534 		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
535 	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
536 	MLX5_SET(dest_format_struct, dest, destination_type,
537 		 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
538 	MLX5_SET(dest_format_struct, dest, destination_id,
539 		 mlx5_get_flow_table_id(priv->ft.main));
540 
541 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
542 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
543 			 outer_headers.vlan_tag);
544 
545 	switch (rule_type) {
546 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
547 		ft_ix = &priv->vlan.untagged_rule_ft_ix;
548 		break;
549 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
550 		ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
551 		MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
552 			 1);
553 		break;
554 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
555 		ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
556 		MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
557 			 1);
558 		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
559 				 outer_headers.first_vid);
560 		MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
561 			 vid);
562 		break;
563 	}
564 
565 	err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
566 					match_criteria, flow_context, ft_ix);
567 	if (err)
568 		netdev_err(priv->netdev, "%s: failed\n", __func__);
569 
570 add_vlan_rule_out:
571 	kvfree(match_criteria);
572 	kvfree(flow_context);
573 	return err;
574 }
575 
mlx5e_del_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)576 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
577 				enum mlx5e_vlan_rule_type rule_type, u16 vid)
578 {
579 	switch (rule_type) {
580 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
581 		mlx5_del_flow_table_entry(priv->ft.vlan,
582 					  priv->vlan.untagged_rule_ft_ix);
583 		break;
584 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
585 		mlx5_del_flow_table_entry(priv->ft.vlan,
586 					  priv->vlan.any_vlan_rule_ft_ix);
587 		break;
588 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
589 		mlx5_del_flow_table_entry(priv->ft.vlan,
590 					  priv->vlan.active_vlans_ft_ix[vid]);
591 		break;
592 	}
593 }
594 
mlx5e_enable_vlan_filter(struct mlx5e_priv * priv)595 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
596 {
597 	if (!priv->vlan.filter_disabled)
598 		return;
599 
600 	priv->vlan.filter_disabled = false;
601 	if (priv->netdev->flags & IFF_PROMISC)
602 		return;
603 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
604 }
605 
mlx5e_disable_vlan_filter(struct mlx5e_priv * priv)606 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
607 {
608 	if (priv->vlan.filter_disabled)
609 		return;
610 
611 	priv->vlan.filter_disabled = true;
612 	if (priv->netdev->flags & IFF_PROMISC)
613 		return;
614 	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
615 }
616 
mlx5e_vlan_rx_add_vid(struct net_device * dev,__always_unused __be16 proto,u16 vid)617 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
618 			  u16 vid)
619 {
620 	struct mlx5e_priv *priv = netdev_priv(dev);
621 
622 	return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
623 }
624 
mlx5e_vlan_rx_kill_vid(struct net_device * dev,__always_unused __be16 proto,u16 vid)625 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
626 			   u16 vid)
627 {
628 	struct mlx5e_priv *priv = netdev_priv(dev);
629 
630 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
631 
632 	return 0;
633 }
634 
635 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
636 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
637 		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
638 
mlx5e_execute_action(struct mlx5e_priv * priv,struct mlx5e_eth_addr_hash_node * hn)639 static void mlx5e_execute_action(struct mlx5e_priv *priv,
640 				 struct mlx5e_eth_addr_hash_node *hn)
641 {
642 	switch (hn->action) {
643 	case MLX5E_ACTION_ADD:
644 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
645 		hn->action = MLX5E_ACTION_NONE;
646 		break;
647 
648 	case MLX5E_ACTION_DEL:
649 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
650 		mlx5e_del_eth_addr_from_hash(hn);
651 		break;
652 	}
653 }
654 
mlx5e_sync_netdev_addr(struct mlx5e_priv * priv)655 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
656 {
657 	struct net_device *netdev = priv->netdev;
658 	struct netdev_hw_addr *ha;
659 
660 	netif_addr_lock_bh(netdev);
661 
662 	mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
663 				   priv->netdev->dev_addr);
664 
665 	netdev_for_each_uc_addr(ha, netdev)
666 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
667 
668 	netdev_for_each_mc_addr(ha, netdev)
669 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
670 
671 	netif_addr_unlock_bh(netdev);
672 }
673 
mlx5e_apply_netdev_addr(struct mlx5e_priv * priv)674 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
675 {
676 	struct mlx5e_eth_addr_hash_node *hn;
677 	struct hlist_node *tmp;
678 	int i;
679 
680 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
681 		mlx5e_execute_action(priv, hn);
682 
683 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
684 		mlx5e_execute_action(priv, hn);
685 }
686 
mlx5e_handle_netdev_addr(struct mlx5e_priv * priv)687 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
688 {
689 	struct mlx5e_eth_addr_hash_node *hn;
690 	struct hlist_node *tmp;
691 	int i;
692 
693 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
694 		hn->action = MLX5E_ACTION_DEL;
695 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
696 		hn->action = MLX5E_ACTION_DEL;
697 
698 	if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
699 		mlx5e_sync_netdev_addr(priv);
700 
701 	mlx5e_apply_netdev_addr(priv);
702 }
703 
mlx5e_set_rx_mode_work(struct work_struct * work)704 void mlx5e_set_rx_mode_work(struct work_struct *work)
705 {
706 	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
707 					       set_rx_mode_work);
708 
709 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
710 	struct net_device *ndev = priv->netdev;
711 
712 	bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
713 	bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
714 	bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
715 	bool broadcast_enabled = rx_mode_enable;
716 
717 	bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
718 	bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
719 	bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
720 	bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
721 	bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
722 	bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
723 
724 	if (enable_promisc) {
725 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
726 		if (!priv->vlan.filter_disabled)
727 			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
728 					    0);
729 	}
730 	if (enable_allmulti)
731 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
732 	if (enable_broadcast)
733 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
734 
735 	mlx5e_handle_netdev_addr(priv);
736 
737 	if (disable_broadcast)
738 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
739 	if (disable_allmulti)
740 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
741 	if (disable_promisc) {
742 		if (!priv->vlan.filter_disabled)
743 			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
744 					    0);
745 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
746 	}
747 
748 	ea->promisc_enabled   = promisc_enabled;
749 	ea->allmulti_enabled  = allmulti_enabled;
750 	ea->broadcast_enabled = broadcast_enabled;
751 }
752 
mlx5e_init_eth_addr(struct mlx5e_priv * priv)753 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
754 {
755 	ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
756 }
757 
mlx5e_create_main_flow_table(struct mlx5e_priv * priv)758 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
759 {
760 	struct mlx5_flow_table_group *g;
761 	u8 *dmac;
762 
763 	g = kcalloc(9, sizeof(*g), GFP_KERNEL);
764 	if (!g)
765 		return -ENOMEM;
766 
767 	g[0].log_sz = 3;
768 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
769 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
770 			 outer_headers.ethertype);
771 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
772 			 outer_headers.ip_protocol);
773 
774 	g[1].log_sz = 1;
775 	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
776 	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
777 			 outer_headers.ethertype);
778 
779 	g[2].log_sz = 0;
780 
781 	g[3].log_sz = 14;
782 	g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
783 	dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
784 			    outer_headers.dmac_47_16);
785 	memset(dmac, 0xff, ETH_ALEN);
786 	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
787 			 outer_headers.ethertype);
788 	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
789 			 outer_headers.ip_protocol);
790 
791 	g[4].log_sz = 13;
792 	g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
793 	dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
794 			    outer_headers.dmac_47_16);
795 	memset(dmac, 0xff, ETH_ALEN);
796 	MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
797 			 outer_headers.ethertype);
798 
799 	g[5].log_sz = 11;
800 	g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
801 	dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
802 			    outer_headers.dmac_47_16);
803 	memset(dmac, 0xff, ETH_ALEN);
804 
805 	g[6].log_sz = 2;
806 	g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
807 	dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
808 			    outer_headers.dmac_47_16);
809 	dmac[0] = 0x01;
810 	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
811 			 outer_headers.ethertype);
812 	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
813 			 outer_headers.ip_protocol);
814 
815 	g[7].log_sz = 1;
816 	g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
817 	dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
818 			    outer_headers.dmac_47_16);
819 	dmac[0] = 0x01;
820 	MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
821 			 outer_headers.ethertype);
822 
823 	g[8].log_sz = 0;
824 	g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
825 	dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
826 			    outer_headers.dmac_47_16);
827 	dmac[0] = 0x01;
828 	priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
829 					       MLX5_FLOW_TABLE_TYPE_NIC_RCV,
830 					       9, g);
831 	kfree(g);
832 
833 	return priv->ft.main ? 0 : -ENOMEM;
834 }
835 
mlx5e_destroy_main_flow_table(struct mlx5e_priv * priv)836 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
837 {
838 	mlx5_destroy_flow_table(priv->ft.main);
839 }
840 
mlx5e_create_vlan_flow_table(struct mlx5e_priv * priv)841 static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
842 {
843 	struct mlx5_flow_table_group *g;
844 
845 	g = kcalloc(2, sizeof(*g), GFP_KERNEL);
846 	if (!g)
847 		return -ENOMEM;
848 
849 	g[0].log_sz = 12;
850 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
851 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
852 			 outer_headers.vlan_tag);
853 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
854 			 outer_headers.first_vid);
855 
856 	/* untagged + any vlan id */
857 	g[1].log_sz = 1;
858 	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
859 	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
860 			 outer_headers.vlan_tag);
861 
862 	priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
863 					       MLX5_FLOW_TABLE_TYPE_NIC_RCV,
864 					       2, g);
865 
866 	kfree(g);
867 	return priv->ft.vlan ? 0 : -ENOMEM;
868 }
869 
mlx5e_destroy_vlan_flow_table(struct mlx5e_priv * priv)870 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
871 {
872 	mlx5_destroy_flow_table(priv->ft.vlan);
873 }
874 
mlx5e_create_flow_tables(struct mlx5e_priv * priv)875 int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
876 {
877 	int err;
878 
879 	err = mlx5e_create_main_flow_table(priv);
880 	if (err)
881 		return err;
882 
883 	err = mlx5e_create_vlan_flow_table(priv);
884 	if (err)
885 		goto err_destroy_main_flow_table;
886 
887 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
888 	if (err)
889 		goto err_destroy_vlan_flow_table;
890 
891 	return 0;
892 
893 err_destroy_vlan_flow_table:
894 	mlx5e_destroy_vlan_flow_table(priv);
895 
896 err_destroy_main_flow_table:
897 	mlx5e_destroy_main_flow_table(priv);
898 
899 	return err;
900 }
901 
mlx5e_destroy_flow_tables(struct mlx5e_priv * priv)902 void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
903 {
904 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
905 	mlx5e_destroy_vlan_flow_table(priv);
906 	mlx5e_destroy_main_flow_table(priv);
907 }
908