1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/etherdevice.h>
35#include <linux/tcp.h>
36#include <linux/if_vlan.h>
37#include <linux/delay.h>
38#include <linux/slab.h>
39#include <linux/hash.h>
40#include <net/ip.h>
41#include <net/busy_poll.h>
42#include <net/vxlan.h>
43
44#include <linux/mlx4/driver.h>
45#include <linux/mlx4/device.h>
46#include <linux/mlx4/cmd.h>
47#include <linux/mlx4/cq.h>
48
49#include "mlx4_en.h"
50#include "en_port.h"
51
52int mlx4_en_setup_tc(struct net_device *dev, u8 up)
53{
54	struct mlx4_en_priv *priv = netdev_priv(dev);
55	int i;
56	unsigned int offset = 0;
57
58	if (up && up != MLX4_EN_NUM_UP)
59		return -EINVAL;
60
61	netdev_set_num_tc(dev, up);
62
63	/* Partition Tx queues evenly amongst UP's */
64	for (i = 0; i < up; i++) {
65		netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
66		offset += priv->num_tx_rings_p_up;
67	}
68
69	return 0;
70}
71
72#ifdef CONFIG_NET_RX_BUSY_POLL
73/* must be called with local_bh_disable()d */
74static int mlx4_en_low_latency_recv(struct napi_struct *napi)
75{
76	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
77	struct net_device *dev = cq->dev;
78	struct mlx4_en_priv *priv = netdev_priv(dev);
79	struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
80	int done;
81
82	if (!priv->port_up)
83		return LL_FLUSH_FAILED;
84
85	if (!mlx4_en_cq_lock_poll(cq))
86		return LL_FLUSH_BUSY;
87
88	done = mlx4_en_process_rx_cq(dev, cq, 4);
89	if (likely(done))
90		rx_ring->cleaned += done;
91	else
92		rx_ring->misses++;
93
94	mlx4_en_cq_unlock_poll(cq);
95
96	return done;
97}
98#endif	/* CONFIG_NET_RX_BUSY_POLL */
99
100#ifdef CONFIG_RFS_ACCEL
101
102struct mlx4_en_filter {
103	struct list_head next;
104	struct work_struct work;
105
106	u8     ip_proto;
107	__be32 src_ip;
108	__be32 dst_ip;
109	__be16 src_port;
110	__be16 dst_port;
111
112	int rxq_index;
113	struct mlx4_en_priv *priv;
114	u32 flow_id;			/* RFS infrastructure id */
115	int id;				/* mlx4_en driver id */
116	u64 reg_id;			/* Flow steering API id */
117	u8 activated;			/* Used to prevent expiry before filter
118					 * is attached
119					 */
120	struct hlist_node filter_chain;
121};
122
123static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
124
125static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
126{
127	switch (ip_proto) {
128	case IPPROTO_UDP:
129		return MLX4_NET_TRANS_RULE_ID_UDP;
130	case IPPROTO_TCP:
131		return MLX4_NET_TRANS_RULE_ID_TCP;
132	default:
133		return MLX4_NET_TRANS_RULE_NUM;
134	}
135};
136
137static void mlx4_en_filter_work(struct work_struct *work)
138{
139	struct mlx4_en_filter *filter = container_of(work,
140						     struct mlx4_en_filter,
141						     work);
142	struct mlx4_en_priv *priv = filter->priv;
143	struct mlx4_spec_list spec_tcp_udp = {
144		.id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
145		{
146			.tcp_udp = {
147				.dst_port = filter->dst_port,
148				.dst_port_msk = (__force __be16)-1,
149				.src_port = filter->src_port,
150				.src_port_msk = (__force __be16)-1,
151			},
152		},
153	};
154	struct mlx4_spec_list spec_ip = {
155		.id = MLX4_NET_TRANS_RULE_ID_IPV4,
156		{
157			.ipv4 = {
158				.dst_ip = filter->dst_ip,
159				.dst_ip_msk = (__force __be32)-1,
160				.src_ip = filter->src_ip,
161				.src_ip_msk = (__force __be32)-1,
162			},
163		},
164	};
165	struct mlx4_spec_list spec_eth = {
166		.id = MLX4_NET_TRANS_RULE_ID_ETH,
167	};
168	struct mlx4_net_trans_rule rule = {
169		.list = LIST_HEAD_INIT(rule.list),
170		.queue_mode = MLX4_NET_TRANS_Q_LIFO,
171		.exclusive = 1,
172		.allow_loopback = 1,
173		.promisc_mode = MLX4_FS_REGULAR,
174		.port = priv->port,
175		.priority = MLX4_DOMAIN_RFS,
176	};
177	int rc;
178	__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
179
180	if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
181		en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
182			filter->ip_proto);
183		goto ignore;
184	}
185	list_add_tail(&spec_eth.list, &rule.list);
186	list_add_tail(&spec_ip.list, &rule.list);
187	list_add_tail(&spec_tcp_udp.list, &rule.list);
188
189	rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
190	memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
191	memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
192
193	filter->activated = 0;
194
195	if (filter->reg_id) {
196		rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
197		if (rc && rc != -ENOENT)
198			en_err(priv, "Error detaching flow. rc = %d\n", rc);
199	}
200
201	rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
202	if (rc)
203		en_err(priv, "Error attaching flow. err = %d\n", rc);
204
205ignore:
206	mlx4_en_filter_rfs_expire(priv);
207
208	filter->activated = 1;
209}
210
211static inline struct hlist_head *
212filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
213		   __be16 src_port, __be16 dst_port)
214{
215	unsigned long l;
216	int bucket_idx;
217
218	l = (__force unsigned long)src_port |
219	    ((__force unsigned long)dst_port << 2);
220	l ^= (__force unsigned long)(src_ip ^ dst_ip);
221
222	bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
223
224	return &priv->filter_hash[bucket_idx];
225}
226
227static struct mlx4_en_filter *
228mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
229		     __be32 dst_ip, u8 ip_proto, __be16 src_port,
230		     __be16 dst_port, u32 flow_id)
231{
232	struct mlx4_en_filter *filter = NULL;
233
234	filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
235	if (!filter)
236		return NULL;
237
238	filter->priv = priv;
239	filter->rxq_index = rxq_index;
240	INIT_WORK(&filter->work, mlx4_en_filter_work);
241
242	filter->src_ip = src_ip;
243	filter->dst_ip = dst_ip;
244	filter->ip_proto = ip_proto;
245	filter->src_port = src_port;
246	filter->dst_port = dst_port;
247
248	filter->flow_id = flow_id;
249
250	filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
251
252	list_add_tail(&filter->next, &priv->filters);
253	hlist_add_head(&filter->filter_chain,
254		       filter_hash_bucket(priv, src_ip, dst_ip, src_port,
255					  dst_port));
256
257	return filter;
258}
259
260static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
261{
262	struct mlx4_en_priv *priv = filter->priv;
263	int rc;
264
265	list_del(&filter->next);
266
267	rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
268	if (rc && rc != -ENOENT)
269		en_err(priv, "Error detaching flow. rc = %d\n", rc);
270
271	kfree(filter);
272}
273
274static inline struct mlx4_en_filter *
275mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
276		    u8 ip_proto, __be16 src_port, __be16 dst_port)
277{
278	struct mlx4_en_filter *filter;
279	struct mlx4_en_filter *ret = NULL;
280
281	hlist_for_each_entry(filter,
282			     filter_hash_bucket(priv, src_ip, dst_ip,
283						src_port, dst_port),
284			     filter_chain) {
285		if (filter->src_ip == src_ip &&
286		    filter->dst_ip == dst_ip &&
287		    filter->ip_proto == ip_proto &&
288		    filter->src_port == src_port &&
289		    filter->dst_port == dst_port) {
290			ret = filter;
291			break;
292		}
293	}
294
295	return ret;
296}
297
298static int
299mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
300		   u16 rxq_index, u32 flow_id)
301{
302	struct mlx4_en_priv *priv = netdev_priv(net_dev);
303	struct mlx4_en_filter *filter;
304	const struct iphdr *ip;
305	const __be16 *ports;
306	u8 ip_proto;
307	__be32 src_ip;
308	__be32 dst_ip;
309	__be16 src_port;
310	__be16 dst_port;
311	int nhoff = skb_network_offset(skb);
312	int ret = 0;
313
314	if (skb->protocol != htons(ETH_P_IP))
315		return -EPROTONOSUPPORT;
316
317	ip = (const struct iphdr *)(skb->data + nhoff);
318	if (ip_is_fragment(ip))
319		return -EPROTONOSUPPORT;
320
321	if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
322		return -EPROTONOSUPPORT;
323	ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
324
325	ip_proto = ip->protocol;
326	src_ip = ip->saddr;
327	dst_ip = ip->daddr;
328	src_port = ports[0];
329	dst_port = ports[1];
330
331	spin_lock_bh(&priv->filters_lock);
332	filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
333				     src_port, dst_port);
334	if (filter) {
335		if (filter->rxq_index == rxq_index)
336			goto out;
337
338		filter->rxq_index = rxq_index;
339	} else {
340		filter = mlx4_en_filter_alloc(priv, rxq_index,
341					      src_ip, dst_ip, ip_proto,
342					      src_port, dst_port, flow_id);
343		if (!filter) {
344			ret = -ENOMEM;
345			goto err;
346		}
347	}
348
349	queue_work(priv->mdev->workqueue, &filter->work);
350
351out:
352	ret = filter->id;
353err:
354	spin_unlock_bh(&priv->filters_lock);
355
356	return ret;
357}
358
359void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
360{
361	struct mlx4_en_filter *filter, *tmp;
362	LIST_HEAD(del_list);
363
364	spin_lock_bh(&priv->filters_lock);
365	list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
366		list_move(&filter->next, &del_list);
367		hlist_del(&filter->filter_chain);
368	}
369	spin_unlock_bh(&priv->filters_lock);
370
371	list_for_each_entry_safe(filter, tmp, &del_list, next) {
372		cancel_work_sync(&filter->work);
373		mlx4_en_filter_free(filter);
374	}
375}
376
377static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
378{
379	struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
380	LIST_HEAD(del_list);
381	int i = 0;
382
383	spin_lock_bh(&priv->filters_lock);
384	list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
385		if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
386			break;
387
388		if (filter->activated &&
389		    !work_pending(&filter->work) &&
390		    rps_may_expire_flow(priv->dev,
391					filter->rxq_index, filter->flow_id,
392					filter->id)) {
393			list_move(&filter->next, &del_list);
394			hlist_del(&filter->filter_chain);
395		} else
396			last_filter = filter;
397
398		i++;
399	}
400
401	if (last_filter && (&last_filter->next != priv->filters.next))
402		list_move(&priv->filters, &last_filter->next);
403
404	spin_unlock_bh(&priv->filters_lock);
405
406	list_for_each_entry_safe(filter, tmp, &del_list, next)
407		mlx4_en_filter_free(filter);
408}
409#endif
410
411static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
412				   __be16 proto, u16 vid)
413{
414	struct mlx4_en_priv *priv = netdev_priv(dev);
415	struct mlx4_en_dev *mdev = priv->mdev;
416	int err;
417	int idx;
418
419	en_dbg(HW, priv, "adding VLAN:%d\n", vid);
420
421	set_bit(vid, priv->active_vlans);
422
423	/* Add VID to port VLAN filter */
424	mutex_lock(&mdev->state_lock);
425	if (mdev->device_up && priv->port_up) {
426		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
427		if (err)
428			en_err(priv, "Failed configuring VLAN filter\n");
429	}
430	if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
431		en_dbg(HW, priv, "failed adding vlan %d\n", vid);
432	mutex_unlock(&mdev->state_lock);
433
434	return 0;
435}
436
437static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
438				    __be16 proto, u16 vid)
439{
440	struct mlx4_en_priv *priv = netdev_priv(dev);
441	struct mlx4_en_dev *mdev = priv->mdev;
442	int err;
443
444	en_dbg(HW, priv, "Killing VID:%d\n", vid);
445
446	clear_bit(vid, priv->active_vlans);
447
448	/* Remove VID from port VLAN filter */
449	mutex_lock(&mdev->state_lock);
450	mlx4_unregister_vlan(mdev->dev, priv->port, vid);
451
452	if (mdev->device_up && priv->port_up) {
453		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
454		if (err)
455			en_err(priv, "Failed configuring VLAN filter\n");
456	}
457	mutex_unlock(&mdev->state_lock);
458
459	return 0;
460}
461
462static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
463{
464	int i;
465	for (i = ETH_ALEN - 1; i >= 0; --i) {
466		dst_mac[i] = src_mac & 0xff;
467		src_mac >>= 8;
468	}
469	memset(&dst_mac[ETH_ALEN], 0, 2);
470}
471
472
473static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
474				    int qpn, u64 *reg_id)
475{
476	int err;
477
478	if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
479	    priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
480		return 0; /* do nothing */
481
482	err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
483				    MLX4_DOMAIN_NIC, reg_id);
484	if (err) {
485		en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
486		return err;
487	}
488	en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
489	return 0;
490}
491
492
493static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
494				unsigned char *mac, int *qpn, u64 *reg_id)
495{
496	struct mlx4_en_dev *mdev = priv->mdev;
497	struct mlx4_dev *dev = mdev->dev;
498	int err;
499
500	switch (dev->caps.steering_mode) {
501	case MLX4_STEERING_MODE_B0: {
502		struct mlx4_qp qp;
503		u8 gid[16] = {0};
504
505		qp.qpn = *qpn;
506		memcpy(&gid[10], mac, ETH_ALEN);
507		gid[5] = priv->port;
508
509		err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
510		break;
511	}
512	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
513		struct mlx4_spec_list spec_eth = { {NULL} };
514		__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
515
516		struct mlx4_net_trans_rule rule = {
517			.queue_mode = MLX4_NET_TRANS_Q_FIFO,
518			.exclusive = 0,
519			.allow_loopback = 1,
520			.promisc_mode = MLX4_FS_REGULAR,
521			.priority = MLX4_DOMAIN_NIC,
522		};
523
524		rule.port = priv->port;
525		rule.qpn = *qpn;
526		INIT_LIST_HEAD(&rule.list);
527
528		spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
529		memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
530		memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
531		list_add_tail(&spec_eth.list, &rule.list);
532
533		err = mlx4_flow_attach(dev, &rule, reg_id);
534		break;
535	}
536	default:
537		return -EINVAL;
538	}
539	if (err)
540		en_warn(priv, "Failed Attaching Unicast\n");
541
542	return err;
543}
544
545static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
546				     unsigned char *mac, int qpn, u64 reg_id)
547{
548	struct mlx4_en_dev *mdev = priv->mdev;
549	struct mlx4_dev *dev = mdev->dev;
550
551	switch (dev->caps.steering_mode) {
552	case MLX4_STEERING_MODE_B0: {
553		struct mlx4_qp qp;
554		u8 gid[16] = {0};
555
556		qp.qpn = qpn;
557		memcpy(&gid[10], mac, ETH_ALEN);
558		gid[5] = priv->port;
559
560		mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
561		break;
562	}
563	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
564		mlx4_flow_detach(dev, reg_id);
565		break;
566	}
567	default:
568		en_err(priv, "Invalid steering mode.\n");
569	}
570}
571
572static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
573{
574	struct mlx4_en_dev *mdev = priv->mdev;
575	struct mlx4_dev *dev = mdev->dev;
576	struct mlx4_mac_entry *entry;
577	int index = 0;
578	int err = 0;
579	u64 reg_id = 0;
580	int *qpn = &priv->base_qpn;
581	u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
582
583	en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
584	       priv->dev->dev_addr);
585	index = mlx4_register_mac(dev, priv->port, mac);
586	if (index < 0) {
587		err = index;
588		en_err(priv, "Failed adding MAC: %pM\n",
589		       priv->dev->dev_addr);
590		return err;
591	}
592
593	if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
594		int base_qpn = mlx4_get_base_qpn(dev, priv->port);
595		*qpn = base_qpn + index;
596		return 0;
597	}
598
599	err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
600	en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
601	if (err) {
602		en_err(priv, "Failed to reserve qp for mac registration\n");
603		goto qp_err;
604	}
605
606	err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
607	if (err)
608		goto steer_err;
609
610	err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
611				       &priv->tunnel_reg_id);
612	if (err)
613		goto tunnel_err;
614
615	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
616	if (!entry) {
617		err = -ENOMEM;
618		goto alloc_err;
619	}
620	memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
621	memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
622	entry->reg_id = reg_id;
623
624	hlist_add_head_rcu(&entry->hlist,
625			   &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
626
627	return 0;
628
629alloc_err:
630	if (priv->tunnel_reg_id)
631		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
632tunnel_err:
633	mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
634
635steer_err:
636	mlx4_qp_release_range(dev, *qpn, 1);
637
638qp_err:
639	mlx4_unregister_mac(dev, priv->port, mac);
640	return err;
641}
642
643static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
644{
645	struct mlx4_en_dev *mdev = priv->mdev;
646	struct mlx4_dev *dev = mdev->dev;
647	int qpn = priv->base_qpn;
648	u64 mac;
649
650	if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
651		mac = mlx4_mac_to_u64(priv->dev->dev_addr);
652		en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
653		       priv->dev->dev_addr);
654		mlx4_unregister_mac(dev, priv->port, mac);
655	} else {
656		struct mlx4_mac_entry *entry;
657		struct hlist_node *tmp;
658		struct hlist_head *bucket;
659		unsigned int i;
660
661		for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
662			bucket = &priv->mac_hash[i];
663			hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
664				mac = mlx4_mac_to_u64(entry->mac);
665				en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
666				       entry->mac);
667				mlx4_en_uc_steer_release(priv, entry->mac,
668							 qpn, entry->reg_id);
669
670				mlx4_unregister_mac(dev, priv->port, mac);
671				hlist_del_rcu(&entry->hlist);
672				kfree_rcu(entry, rcu);
673			}
674		}
675
676		if (priv->tunnel_reg_id) {
677			mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
678			priv->tunnel_reg_id = 0;
679		}
680
681		en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
682		       priv->port, qpn);
683		mlx4_qp_release_range(dev, qpn, 1);
684		priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
685	}
686}
687
688static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
689			       unsigned char *new_mac, unsigned char *prev_mac)
690{
691	struct mlx4_en_dev *mdev = priv->mdev;
692	struct mlx4_dev *dev = mdev->dev;
693	int err = 0;
694	u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
695
696	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
697		struct hlist_head *bucket;
698		unsigned int mac_hash;
699		struct mlx4_mac_entry *entry;
700		struct hlist_node *tmp;
701		u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
702
703		bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
704		hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
705			if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
706				mlx4_en_uc_steer_release(priv, entry->mac,
707							 qpn, entry->reg_id);
708				mlx4_unregister_mac(dev, priv->port,
709						    prev_mac_u64);
710				hlist_del_rcu(&entry->hlist);
711				synchronize_rcu();
712				memcpy(entry->mac, new_mac, ETH_ALEN);
713				entry->reg_id = 0;
714				mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
715				hlist_add_head_rcu(&entry->hlist,
716						   &priv->mac_hash[mac_hash]);
717				mlx4_register_mac(dev, priv->port, new_mac_u64);
718				err = mlx4_en_uc_steer_add(priv, new_mac,
719							   &qpn,
720							   &entry->reg_id);
721				if (err)
722					return err;
723				if (priv->tunnel_reg_id) {
724					mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
725					priv->tunnel_reg_id = 0;
726				}
727				err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
728							       &priv->tunnel_reg_id);
729				return err;
730			}
731		}
732		return -EINVAL;
733	}
734
735	return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
736}
737
738static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
739			      unsigned char new_mac[ETH_ALEN + 2])
740{
741	int err = 0;
742
743	if (priv->port_up) {
744		/* Remove old MAC and insert the new one */
745		err = mlx4_en_replace_mac(priv, priv->base_qpn,
746					  new_mac, priv->current_mac);
747		if (err)
748			en_err(priv, "Failed changing HW MAC address\n");
749	} else
750		en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
751
752	if (!err)
753		memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
754
755	return err;
756}
757
758static int mlx4_en_set_mac(struct net_device *dev, void *addr)
759{
760	struct mlx4_en_priv *priv = netdev_priv(dev);
761	struct mlx4_en_dev *mdev = priv->mdev;
762	struct sockaddr *saddr = addr;
763	unsigned char new_mac[ETH_ALEN + 2];
764	int err;
765
766	if (!is_valid_ether_addr(saddr->sa_data))
767		return -EADDRNOTAVAIL;
768
769	mutex_lock(&mdev->state_lock);
770	memcpy(new_mac, saddr->sa_data, ETH_ALEN);
771	err = mlx4_en_do_set_mac(priv, new_mac);
772	if (!err)
773		memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
774	mutex_unlock(&mdev->state_lock);
775
776	return err;
777}
778
779static void mlx4_en_clear_list(struct net_device *dev)
780{
781	struct mlx4_en_priv *priv = netdev_priv(dev);
782	struct mlx4_en_mc_list *tmp, *mc_to_del;
783
784	list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
785		list_del(&mc_to_del->list);
786		kfree(mc_to_del);
787	}
788}
789
790static void mlx4_en_cache_mclist(struct net_device *dev)
791{
792	struct mlx4_en_priv *priv = netdev_priv(dev);
793	struct netdev_hw_addr *ha;
794	struct mlx4_en_mc_list *tmp;
795
796	mlx4_en_clear_list(dev);
797	netdev_for_each_mc_addr(ha, dev) {
798		tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
799		if (!tmp) {
800			mlx4_en_clear_list(dev);
801			return;
802		}
803		memcpy(tmp->addr, ha->addr, ETH_ALEN);
804		list_add_tail(&tmp->list, &priv->mc_list);
805	}
806}
807
808static void update_mclist_flags(struct mlx4_en_priv *priv,
809				struct list_head *dst,
810				struct list_head *src)
811{
812	struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
813	bool found;
814
815	/* Find all the entries that should be removed from dst,
816	 * These are the entries that are not found in src
817	 */
818	list_for_each_entry(dst_tmp, dst, list) {
819		found = false;
820		list_for_each_entry(src_tmp, src, list) {
821			if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
822				found = true;
823				break;
824			}
825		}
826		if (!found)
827			dst_tmp->action = MCLIST_REM;
828	}
829
830	/* Add entries that exist in src but not in dst
831	 * mark them as need to add
832	 */
833	list_for_each_entry(src_tmp, src, list) {
834		found = false;
835		list_for_each_entry(dst_tmp, dst, list) {
836			if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
837				dst_tmp->action = MCLIST_NONE;
838				found = true;
839				break;
840			}
841		}
842		if (!found) {
843			new_mc = kmemdup(src_tmp,
844					 sizeof(struct mlx4_en_mc_list),
845					 GFP_KERNEL);
846			if (!new_mc)
847				return;
848
849			new_mc->action = MCLIST_ADD;
850			list_add_tail(&new_mc->list, dst);
851		}
852	}
853}
854
855static void mlx4_en_set_rx_mode(struct net_device *dev)
856{
857	struct mlx4_en_priv *priv = netdev_priv(dev);
858
859	if (!priv->port_up)
860		return;
861
862	queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
863}
864
865static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
866				     struct mlx4_en_dev *mdev)
867{
868	int err = 0;
869
870	if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
871		if (netif_msg_rx_status(priv))
872			en_warn(priv, "Entering promiscuous mode\n");
873		priv->flags |= MLX4_EN_FLAG_PROMISC;
874
875		/* Enable promiscouos mode */
876		switch (mdev->dev->caps.steering_mode) {
877		case MLX4_STEERING_MODE_DEVICE_MANAGED:
878			err = mlx4_flow_steer_promisc_add(mdev->dev,
879							  priv->port,
880							  priv->base_qpn,
881							  MLX4_FS_ALL_DEFAULT);
882			if (err)
883				en_err(priv, "Failed enabling promiscuous mode\n");
884			priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
885			break;
886
887		case MLX4_STEERING_MODE_B0:
888			err = mlx4_unicast_promisc_add(mdev->dev,
889						       priv->base_qpn,
890						       priv->port);
891			if (err)
892				en_err(priv, "Failed enabling unicast promiscuous mode\n");
893
894			/* Add the default qp number as multicast
895			 * promisc
896			 */
897			if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
898				err = mlx4_multicast_promisc_add(mdev->dev,
899								 priv->base_qpn,
900								 priv->port);
901				if (err)
902					en_err(priv, "Failed enabling multicast promiscuous mode\n");
903				priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
904			}
905			break;
906
907		case MLX4_STEERING_MODE_A0:
908			err = mlx4_SET_PORT_qpn_calc(mdev->dev,
909						     priv->port,
910						     priv->base_qpn,
911						     1);
912			if (err)
913				en_err(priv, "Failed enabling promiscuous mode\n");
914			break;
915		}
916
917		/* Disable port multicast filter (unconditionally) */
918		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
919					  0, MLX4_MCAST_DISABLE);
920		if (err)
921			en_err(priv, "Failed disabling multicast filter\n");
922	}
923}
924
925static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
926				       struct mlx4_en_dev *mdev)
927{
928	int err = 0;
929
930	if (netif_msg_rx_status(priv))
931		en_warn(priv, "Leaving promiscuous mode\n");
932	priv->flags &= ~MLX4_EN_FLAG_PROMISC;
933
934	/* Disable promiscouos mode */
935	switch (mdev->dev->caps.steering_mode) {
936	case MLX4_STEERING_MODE_DEVICE_MANAGED:
937		err = mlx4_flow_steer_promisc_remove(mdev->dev,
938						     priv->port,
939						     MLX4_FS_ALL_DEFAULT);
940		if (err)
941			en_err(priv, "Failed disabling promiscuous mode\n");
942		priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
943		break;
944
945	case MLX4_STEERING_MODE_B0:
946		err = mlx4_unicast_promisc_remove(mdev->dev,
947						  priv->base_qpn,
948						  priv->port);
949		if (err)
950			en_err(priv, "Failed disabling unicast promiscuous mode\n");
951		/* Disable Multicast promisc */
952		if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
953			err = mlx4_multicast_promisc_remove(mdev->dev,
954							    priv->base_qpn,
955							    priv->port);
956			if (err)
957				en_err(priv, "Failed disabling multicast promiscuous mode\n");
958			priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
959		}
960		break;
961
962	case MLX4_STEERING_MODE_A0:
963		err = mlx4_SET_PORT_qpn_calc(mdev->dev,
964					     priv->port,
965					     priv->base_qpn, 0);
966		if (err)
967			en_err(priv, "Failed disabling promiscuous mode\n");
968		break;
969	}
970}
971
972static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
973				 struct net_device *dev,
974				 struct mlx4_en_dev *mdev)
975{
976	struct mlx4_en_mc_list *mclist, *tmp;
977	u64 mcast_addr = 0;
978	u8 mc_list[16] = {0};
979	int err = 0;
980
981	/* Enable/disable the multicast filter according to IFF_ALLMULTI */
982	if (dev->flags & IFF_ALLMULTI) {
983		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
984					  0, MLX4_MCAST_DISABLE);
985		if (err)
986			en_err(priv, "Failed disabling multicast filter\n");
987
988		/* Add the default qp number as multicast promisc */
989		if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
990			switch (mdev->dev->caps.steering_mode) {
991			case MLX4_STEERING_MODE_DEVICE_MANAGED:
992				err = mlx4_flow_steer_promisc_add(mdev->dev,
993								  priv->port,
994								  priv->base_qpn,
995								  MLX4_FS_MC_DEFAULT);
996				break;
997
998			case MLX4_STEERING_MODE_B0:
999				err = mlx4_multicast_promisc_add(mdev->dev,
1000								 priv->base_qpn,
1001								 priv->port);
1002				break;
1003
1004			case MLX4_STEERING_MODE_A0:
1005				break;
1006			}
1007			if (err)
1008				en_err(priv, "Failed entering multicast promisc mode\n");
1009			priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1010		}
1011	} else {
1012		/* Disable Multicast promisc */
1013		if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1014			switch (mdev->dev->caps.steering_mode) {
1015			case MLX4_STEERING_MODE_DEVICE_MANAGED:
1016				err = mlx4_flow_steer_promisc_remove(mdev->dev,
1017								     priv->port,
1018								     MLX4_FS_MC_DEFAULT);
1019				break;
1020
1021			case MLX4_STEERING_MODE_B0:
1022				err = mlx4_multicast_promisc_remove(mdev->dev,
1023								    priv->base_qpn,
1024								    priv->port);
1025				break;
1026
1027			case MLX4_STEERING_MODE_A0:
1028				break;
1029			}
1030			if (err)
1031				en_err(priv, "Failed disabling multicast promiscuous mode\n");
1032			priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1033		}
1034
1035		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1036					  0, MLX4_MCAST_DISABLE);
1037		if (err)
1038			en_err(priv, "Failed disabling multicast filter\n");
1039
1040		/* Flush mcast filter and init it with broadcast address */
1041		mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1042				    1, MLX4_MCAST_CONFIG);
1043
1044		/* Update multicast list - we cache all addresses so they won't
1045		 * change while HW is updated holding the command semaphor */
1046		netif_addr_lock_bh(dev);
1047		mlx4_en_cache_mclist(dev);
1048		netif_addr_unlock_bh(dev);
1049		list_for_each_entry(mclist, &priv->mc_list, list) {
1050			mcast_addr = mlx4_mac_to_u64(mclist->addr);
1051			mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1052					    mcast_addr, 0, MLX4_MCAST_CONFIG);
1053		}
1054		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1055					  0, MLX4_MCAST_ENABLE);
1056		if (err)
1057			en_err(priv, "Failed enabling multicast filter\n");
1058
1059		update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1060		list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1061			if (mclist->action == MCLIST_REM) {
1062				/* detach this address and delete from list */
1063				memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1064				mc_list[5] = priv->port;
1065				err = mlx4_multicast_detach(mdev->dev,
1066							    &priv->rss_map.indir_qp,
1067							    mc_list,
1068							    MLX4_PROT_ETH,
1069							    mclist->reg_id);
1070				if (err)
1071					en_err(priv, "Fail to detach multicast address\n");
1072
1073				if (mclist->tunnel_reg_id) {
1074					err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1075					if (err)
1076						en_err(priv, "Failed to detach multicast address\n");
1077				}
1078
1079				/* remove from list */
1080				list_del(&mclist->list);
1081				kfree(mclist);
1082			} else if (mclist->action == MCLIST_ADD) {
1083				/* attach the address */
1084				memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1085				/* needed for B0 steering support */
1086				mc_list[5] = priv->port;
1087				err = mlx4_multicast_attach(mdev->dev,
1088							    &priv->rss_map.indir_qp,
1089							    mc_list,
1090							    priv->port, 0,
1091							    MLX4_PROT_ETH,
1092							    &mclist->reg_id);
1093				if (err)
1094					en_err(priv, "Fail to attach multicast address\n");
1095
1096				err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1097							       &mclist->tunnel_reg_id);
1098				if (err)
1099					en_err(priv, "Failed to attach multicast address\n");
1100			}
1101		}
1102	}
1103}
1104
1105static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1106				 struct net_device *dev,
1107				 struct mlx4_en_dev *mdev)
1108{
1109	struct netdev_hw_addr *ha;
1110	struct mlx4_mac_entry *entry;
1111	struct hlist_node *tmp;
1112	bool found;
1113	u64 mac;
1114	int err = 0;
1115	struct hlist_head *bucket;
1116	unsigned int i;
1117	int removed = 0;
1118	u32 prev_flags;
1119
1120	/* Note that we do not need to protect our mac_hash traversal with rcu,
1121	 * since all modification code is protected by mdev->state_lock
1122	 */
1123
1124	/* find what to remove */
1125	for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1126		bucket = &priv->mac_hash[i];
1127		hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1128			found = false;
1129			netdev_for_each_uc_addr(ha, dev) {
1130				if (ether_addr_equal_64bits(entry->mac,
1131							    ha->addr)) {
1132					found = true;
1133					break;
1134				}
1135			}
1136
1137			/* MAC address of the port is not in uc list */
1138			if (ether_addr_equal_64bits(entry->mac,
1139						    priv->current_mac))
1140				found = true;
1141
1142			if (!found) {
1143				mac = mlx4_mac_to_u64(entry->mac);
1144				mlx4_en_uc_steer_release(priv, entry->mac,
1145							 priv->base_qpn,
1146							 entry->reg_id);
1147				mlx4_unregister_mac(mdev->dev, priv->port, mac);
1148
1149				hlist_del_rcu(&entry->hlist);
1150				kfree_rcu(entry, rcu);
1151				en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1152				       entry->mac, priv->port);
1153				++removed;
1154			}
1155		}
1156	}
1157
1158	/* if we didn't remove anything, there is no use in trying to add
1159	 * again once we are in a forced promisc mode state
1160	 */
1161	if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1162		return;
1163
1164	prev_flags = priv->flags;
1165	priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1166
1167	/* find what to add */
1168	netdev_for_each_uc_addr(ha, dev) {
1169		found = false;
1170		bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1171		hlist_for_each_entry(entry, bucket, hlist) {
1172			if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1173				found = true;
1174				break;
1175			}
1176		}
1177
1178		if (!found) {
1179			entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1180			if (!entry) {
1181				en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1182				       ha->addr, priv->port);
1183				priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1184				break;
1185			}
1186			mac = mlx4_mac_to_u64(ha->addr);
1187			memcpy(entry->mac, ha->addr, ETH_ALEN);
1188			err = mlx4_register_mac(mdev->dev, priv->port, mac);
1189			if (err < 0) {
1190				en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1191				       ha->addr, priv->port, err);
1192				kfree(entry);
1193				priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1194				break;
1195			}
1196			err = mlx4_en_uc_steer_add(priv, ha->addr,
1197						   &priv->base_qpn,
1198						   &entry->reg_id);
1199			if (err) {
1200				en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1201				       ha->addr, priv->port, err);
1202				mlx4_unregister_mac(mdev->dev, priv->port, mac);
1203				kfree(entry);
1204				priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1205				break;
1206			} else {
1207				unsigned int mac_hash;
1208				en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1209				       ha->addr, priv->port);
1210				mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1211				bucket = &priv->mac_hash[mac_hash];
1212				hlist_add_head_rcu(&entry->hlist, bucket);
1213			}
1214		}
1215	}
1216
1217	if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1218		en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1219			priv->port);
1220	} else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1221		en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1222			priv->port);
1223	}
1224}
1225
1226static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1227{
1228	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1229						 rx_mode_task);
1230	struct mlx4_en_dev *mdev = priv->mdev;
1231	struct net_device *dev = priv->dev;
1232
1233	mutex_lock(&mdev->state_lock);
1234	if (!mdev->device_up) {
1235		en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1236		goto out;
1237	}
1238	if (!priv->port_up) {
1239		en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1240		goto out;
1241	}
1242
1243	if (!netif_carrier_ok(dev)) {
1244		if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1245			if (priv->port_state.link_state) {
1246				priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1247				netif_carrier_on(dev);
1248				en_dbg(LINK, priv, "Link Up\n");
1249			}
1250		}
1251	}
1252
1253	if (dev->priv_flags & IFF_UNICAST_FLT)
1254		mlx4_en_do_uc_filter(priv, dev, mdev);
1255
1256	/* Promsicuous mode: disable all filters */
1257	if ((dev->flags & IFF_PROMISC) ||
1258	    (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1259		mlx4_en_set_promisc_mode(priv, mdev);
1260		goto out;
1261	}
1262
1263	/* Not in promiscuous mode */
1264	if (priv->flags & MLX4_EN_FLAG_PROMISC)
1265		mlx4_en_clear_promisc_mode(priv, mdev);
1266
1267	mlx4_en_do_multicast(priv, dev, mdev);
1268out:
1269	mutex_unlock(&mdev->state_lock);
1270}
1271
1272#ifdef CONFIG_NET_POLL_CONTROLLER
1273static void mlx4_en_netpoll(struct net_device *dev)
1274{
1275	struct mlx4_en_priv *priv = netdev_priv(dev);
1276	struct mlx4_en_cq *cq;
1277	int i;
1278
1279	for (i = 0; i < priv->rx_ring_num; i++) {
1280		cq = priv->rx_cq[i];
1281		napi_schedule(&cq->napi);
1282	}
1283}
1284#endif
1285
1286static void mlx4_en_tx_timeout(struct net_device *dev)
1287{
1288	struct mlx4_en_priv *priv = netdev_priv(dev);
1289	struct mlx4_en_dev *mdev = priv->mdev;
1290	int i;
1291
1292	if (netif_msg_timer(priv))
1293		en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1294
1295	for (i = 0; i < priv->tx_ring_num; i++) {
1296		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1297			continue;
1298		en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1299			i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1300			priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1301	}
1302
1303	priv->port_stats.tx_timeout++;
1304	en_dbg(DRV, priv, "Scheduling watchdog\n");
1305	queue_work(mdev->workqueue, &priv->watchdog_task);
1306}
1307
1308
1309static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
1310{
1311	struct mlx4_en_priv *priv = netdev_priv(dev);
1312
1313	spin_lock_bh(&priv->stats_lock);
1314	memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
1315	spin_unlock_bh(&priv->stats_lock);
1316
1317	return &priv->ret_stats;
1318}
1319
1320static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1321{
1322	struct mlx4_en_cq *cq;
1323	int i;
1324
1325	/* If we haven't received a specific coalescing setting
1326	 * (module param), we set the moderation parameters as follows:
1327	 * - moder_cnt is set to the number of mtu sized packets to
1328	 *   satisfy our coalescing target.
1329	 * - moder_time is set to a fixed value.
1330	 */
1331	priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1332	priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1333	priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1334	priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1335	en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1336	       priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1337
1338	/* Setup cq moderation params */
1339	for (i = 0; i < priv->rx_ring_num; i++) {
1340		cq = priv->rx_cq[i];
1341		cq->moder_cnt = priv->rx_frames;
1342		cq->moder_time = priv->rx_usecs;
1343		priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1344		priv->last_moder_packets[i] = 0;
1345		priv->last_moder_bytes[i] = 0;
1346	}
1347
1348	for (i = 0; i < priv->tx_ring_num; i++) {
1349		cq = priv->tx_cq[i];
1350		cq->moder_cnt = priv->tx_frames;
1351		cq->moder_time = priv->tx_usecs;
1352	}
1353
1354	/* Reset auto-moderation params */
1355	priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1356	priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1357	priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1358	priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1359	priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1360	priv->adaptive_rx_coal = 1;
1361	priv->last_moder_jiffies = 0;
1362	priv->last_moder_tx_packets = 0;
1363}
1364
1365static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1366{
1367	unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1368	struct mlx4_en_cq *cq;
1369	unsigned long packets;
1370	unsigned long rate;
1371	unsigned long avg_pkt_size;
1372	unsigned long rx_packets;
1373	unsigned long rx_bytes;
1374	unsigned long rx_pkt_diff;
1375	int moder_time;
1376	int ring, err;
1377
1378	if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1379		return;
1380
1381	for (ring = 0; ring < priv->rx_ring_num; ring++) {
1382		spin_lock_bh(&priv->stats_lock);
1383		rx_packets = priv->rx_ring[ring]->packets;
1384		rx_bytes = priv->rx_ring[ring]->bytes;
1385		spin_unlock_bh(&priv->stats_lock);
1386
1387		rx_pkt_diff = ((unsigned long) (rx_packets -
1388				priv->last_moder_packets[ring]));
1389		packets = rx_pkt_diff;
1390		rate = packets * HZ / period;
1391		avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1392				priv->last_moder_bytes[ring])) / packets : 0;
1393
1394		/* Apply auto-moderation only when packet rate
1395		 * exceeds a rate that it matters */
1396		if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1397		    avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1398			if (rate < priv->pkt_rate_low)
1399				moder_time = priv->rx_usecs_low;
1400			else if (rate > priv->pkt_rate_high)
1401				moder_time = priv->rx_usecs_high;
1402			else
1403				moder_time = (rate - priv->pkt_rate_low) *
1404					(priv->rx_usecs_high - priv->rx_usecs_low) /
1405					(priv->pkt_rate_high - priv->pkt_rate_low) +
1406					priv->rx_usecs_low;
1407		} else {
1408			moder_time = priv->rx_usecs_low;
1409		}
1410
1411		if (moder_time != priv->last_moder_time[ring]) {
1412			priv->last_moder_time[ring] = moder_time;
1413			cq = priv->rx_cq[ring];
1414			cq->moder_time = moder_time;
1415			cq->moder_cnt = priv->rx_frames;
1416			err = mlx4_en_set_cq_moder(priv, cq);
1417			if (err)
1418				en_err(priv, "Failed modifying moderation for cq:%d\n",
1419				       ring);
1420		}
1421		priv->last_moder_packets[ring] = rx_packets;
1422		priv->last_moder_bytes[ring] = rx_bytes;
1423	}
1424
1425	priv->last_moder_jiffies = jiffies;
1426}
1427
1428static void mlx4_en_do_get_stats(struct work_struct *work)
1429{
1430	struct delayed_work *delay = to_delayed_work(work);
1431	struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1432						 stats_task);
1433	struct mlx4_en_dev *mdev = priv->mdev;
1434	int err;
1435
1436	mutex_lock(&mdev->state_lock);
1437	if (mdev->device_up) {
1438		if (priv->port_up) {
1439			err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1440			if (err)
1441				en_dbg(HW, priv, "Could not update stats\n");
1442
1443			mlx4_en_auto_moderation(priv);
1444		}
1445
1446		queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1447	}
1448	if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1449		mlx4_en_do_set_mac(priv, priv->current_mac);
1450		mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1451	}
1452	mutex_unlock(&mdev->state_lock);
1453}
1454
1455/* mlx4_en_service_task - Run service task for tasks that needed to be done
1456 * periodically
1457 */
1458static void mlx4_en_service_task(struct work_struct *work)
1459{
1460	struct delayed_work *delay = to_delayed_work(work);
1461	struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1462						 service_task);
1463	struct mlx4_en_dev *mdev = priv->mdev;
1464
1465	mutex_lock(&mdev->state_lock);
1466	if (mdev->device_up) {
1467		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1468			mlx4_en_ptp_overflow_check(mdev);
1469
1470		mlx4_en_recover_from_oom(priv);
1471		queue_delayed_work(mdev->workqueue, &priv->service_task,
1472				   SERVICE_TASK_DELAY);
1473	}
1474	mutex_unlock(&mdev->state_lock);
1475}
1476
1477static void mlx4_en_linkstate(struct work_struct *work)
1478{
1479	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1480						 linkstate_task);
1481	struct mlx4_en_dev *mdev = priv->mdev;
1482	int linkstate = priv->link_state;
1483
1484	mutex_lock(&mdev->state_lock);
1485	/* If observable port state changed set carrier state and
1486	 * report to system log */
1487	if (priv->last_link_state != linkstate) {
1488		if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1489			en_info(priv, "Link Down\n");
1490			netif_carrier_off(priv->dev);
1491		} else {
1492			en_info(priv, "Link Up\n");
1493			netif_carrier_on(priv->dev);
1494		}
1495	}
1496	priv->last_link_state = linkstate;
1497	mutex_unlock(&mdev->state_lock);
1498}
1499
1500static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1501{
1502	struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1503	int numa_node = priv->mdev->dev->numa_node;
1504
1505	if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1506		return -ENOMEM;
1507
1508	cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1509			ring->affinity_mask);
1510	return 0;
1511}
1512
1513static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1514{
1515	free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1516}
1517
1518int mlx4_en_start_port(struct net_device *dev)
1519{
1520	struct mlx4_en_priv *priv = netdev_priv(dev);
1521	struct mlx4_en_dev *mdev = priv->mdev;
1522	struct mlx4_en_cq *cq;
1523	struct mlx4_en_tx_ring *tx_ring;
1524	int rx_index = 0;
1525	int tx_index = 0;
1526	int err = 0;
1527	int i;
1528	int j;
1529	u8 mc_list[16] = {0};
1530
1531	if (priv->port_up) {
1532		en_dbg(DRV, priv, "start port called while port already up\n");
1533		return 0;
1534	}
1535
1536	INIT_LIST_HEAD(&priv->mc_list);
1537	INIT_LIST_HEAD(&priv->curr_list);
1538	INIT_LIST_HEAD(&priv->ethtool_list);
1539	memset(&priv->ethtool_rules[0], 0,
1540	       sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1541
1542	/* Calculate Rx buf size */
1543	dev->mtu = min(dev->mtu, priv->max_mtu);
1544	mlx4_en_calc_rx_buf(dev);
1545	en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1546
1547	/* Configure rx cq's and rings */
1548	err = mlx4_en_activate_rx_rings(priv);
1549	if (err) {
1550		en_err(priv, "Failed to activate RX rings\n");
1551		return err;
1552	}
1553	for (i = 0; i < priv->rx_ring_num; i++) {
1554		cq = priv->rx_cq[i];
1555
1556		mlx4_en_cq_init_lock(cq);
1557
1558		err = mlx4_en_init_affinity_hint(priv, i);
1559		if (err) {
1560			en_err(priv, "Failed preparing IRQ affinity hint\n");
1561			goto cq_err;
1562		}
1563
1564		err = mlx4_en_activate_cq(priv, cq, i);
1565		if (err) {
1566			en_err(priv, "Failed activating Rx CQ\n");
1567			mlx4_en_free_affinity_hint(priv, i);
1568			goto cq_err;
1569		}
1570
1571		for (j = 0; j < cq->size; j++) {
1572			struct mlx4_cqe *cqe = NULL;
1573
1574			cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1575			      priv->cqe_factor;
1576			cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1577		}
1578
1579		err = mlx4_en_set_cq_moder(priv, cq);
1580		if (err) {
1581			en_err(priv, "Failed setting cq moderation parameters\n");
1582			mlx4_en_deactivate_cq(priv, cq);
1583			mlx4_en_free_affinity_hint(priv, i);
1584			goto cq_err;
1585		}
1586		mlx4_en_arm_cq(priv, cq);
1587		priv->rx_ring[i]->cqn = cq->mcq.cqn;
1588		++rx_index;
1589	}
1590
1591	/* Set qp number */
1592	en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1593	err = mlx4_en_get_qp(priv);
1594	if (err) {
1595		en_err(priv, "Failed getting eth qp\n");
1596		goto cq_err;
1597	}
1598	mdev->mac_removed[priv->port] = 0;
1599
1600	err = mlx4_en_config_rss_steer(priv);
1601	if (err) {
1602		en_err(priv, "Failed configuring rss steering\n");
1603		goto mac_err;
1604	}
1605
1606	err = mlx4_en_create_drop_qp(priv);
1607	if (err)
1608		goto rss_err;
1609
1610	/* Configure tx cq's and rings */
1611	for (i = 0; i < priv->tx_ring_num; i++) {
1612		/* Configure cq */
1613		cq = priv->tx_cq[i];
1614		err = mlx4_en_activate_cq(priv, cq, i);
1615		if (err) {
1616			en_err(priv, "Failed allocating Tx CQ\n");
1617			goto tx_err;
1618		}
1619		err = mlx4_en_set_cq_moder(priv, cq);
1620		if (err) {
1621			en_err(priv, "Failed setting cq moderation parameters\n");
1622			mlx4_en_deactivate_cq(priv, cq);
1623			goto tx_err;
1624		}
1625		en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1626		cq->buf->wqe_index = cpu_to_be16(0xffff);
1627
1628		/* Configure ring */
1629		tx_ring = priv->tx_ring[i];
1630		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1631			i / priv->num_tx_rings_p_up);
1632		if (err) {
1633			en_err(priv, "Failed allocating Tx ring\n");
1634			mlx4_en_deactivate_cq(priv, cq);
1635			goto tx_err;
1636		}
1637		tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1638
1639		/* Arm CQ for TX completions */
1640		mlx4_en_arm_cq(priv, cq);
1641
1642		/* Set initial ownership of all Tx TXBBs to SW (1) */
1643		for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1644			*((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1645		++tx_index;
1646	}
1647
1648	/* Configure port */
1649	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1650				    priv->rx_skb_size + ETH_FCS_LEN,
1651				    priv->prof->tx_pause,
1652				    priv->prof->tx_ppp,
1653				    priv->prof->rx_pause,
1654				    priv->prof->rx_ppp);
1655	if (err) {
1656		en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1657		       priv->port, err);
1658		goto tx_err;
1659	}
1660	/* Set default qp number */
1661	err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1662	if (err) {
1663		en_err(priv, "Failed setting default qp numbers\n");
1664		goto tx_err;
1665	}
1666
1667	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1668		err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1669		if (err) {
1670			en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1671			       err);
1672			goto tx_err;
1673		}
1674	}
1675
1676	/* Init port */
1677	en_dbg(HW, priv, "Initializing port\n");
1678	err = mlx4_INIT_PORT(mdev->dev, priv->port);
1679	if (err) {
1680		en_err(priv, "Failed Initializing port\n");
1681		goto tx_err;
1682	}
1683
1684	/* Attach rx QP to bradcast address */
1685	eth_broadcast_addr(&mc_list[10]);
1686	mc_list[5] = priv->port; /* needed for B0 steering support */
1687	if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1688				  priv->port, 0, MLX4_PROT_ETH,
1689				  &priv->broadcast_id))
1690		mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1691
1692	/* Must redo promiscuous mode setup. */
1693	priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1694
1695	/* Schedule multicast task to populate multicast list */
1696	queue_work(mdev->workqueue, &priv->rx_mode_task);
1697
1698#ifdef CONFIG_MLX4_EN_VXLAN
1699	if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1700		vxlan_get_rx_port(dev);
1701#endif
1702	priv->port_up = true;
1703	netif_tx_start_all_queues(dev);
1704	netif_device_attach(dev);
1705
1706	return 0;
1707
1708tx_err:
1709	while (tx_index--) {
1710		mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1711		mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1712	}
1713	mlx4_en_destroy_drop_qp(priv);
1714rss_err:
1715	mlx4_en_release_rss_steer(priv);
1716mac_err:
1717	mlx4_en_put_qp(priv);
1718cq_err:
1719	while (rx_index--) {
1720		mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1721		mlx4_en_free_affinity_hint(priv, rx_index);
1722	}
1723	for (i = 0; i < priv->rx_ring_num; i++)
1724		mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1725
1726	return err; /* need to close devices */
1727}
1728
1729
1730void mlx4_en_stop_port(struct net_device *dev, int detach)
1731{
1732	struct mlx4_en_priv *priv = netdev_priv(dev);
1733	struct mlx4_en_dev *mdev = priv->mdev;
1734	struct mlx4_en_mc_list *mclist, *tmp;
1735	struct ethtool_flow_id *flow, *tmp_flow;
1736	int i;
1737	u8 mc_list[16] = {0};
1738
1739	if (!priv->port_up) {
1740		en_dbg(DRV, priv, "stop port called while port already down\n");
1741		return;
1742	}
1743
1744	/* close port*/
1745	mlx4_CLOSE_PORT(mdev->dev, priv->port);
1746
1747	/* Synchronize with tx routine */
1748	netif_tx_lock_bh(dev);
1749	if (detach)
1750		netif_device_detach(dev);
1751	netif_tx_stop_all_queues(dev);
1752	netif_tx_unlock_bh(dev);
1753
1754	netif_tx_disable(dev);
1755
1756	/* Set port as not active */
1757	priv->port_up = false;
1758
1759	/* Promsicuous mode */
1760	if (mdev->dev->caps.steering_mode ==
1761	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1762		priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1763				 MLX4_EN_FLAG_MC_PROMISC);
1764		mlx4_flow_steer_promisc_remove(mdev->dev,
1765					       priv->port,
1766					       MLX4_FS_ALL_DEFAULT);
1767		mlx4_flow_steer_promisc_remove(mdev->dev,
1768					       priv->port,
1769					       MLX4_FS_MC_DEFAULT);
1770	} else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1771		priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1772
1773		/* Disable promiscouos mode */
1774		mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1775					    priv->port);
1776
1777		/* Disable Multicast promisc */
1778		if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1779			mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1780						      priv->port);
1781			priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1782		}
1783	}
1784
1785	/* Detach All multicasts */
1786	eth_broadcast_addr(&mc_list[10]);
1787	mc_list[5] = priv->port; /* needed for B0 steering support */
1788	mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1789			      MLX4_PROT_ETH, priv->broadcast_id);
1790	list_for_each_entry(mclist, &priv->curr_list, list) {
1791		memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1792		mc_list[5] = priv->port;
1793		mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1794				      mc_list, MLX4_PROT_ETH, mclist->reg_id);
1795		if (mclist->tunnel_reg_id)
1796			mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1797	}
1798	mlx4_en_clear_list(dev);
1799	list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1800		list_del(&mclist->list);
1801		kfree(mclist);
1802	}
1803
1804	/* Flush multicast filter */
1805	mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1806
1807	/* Remove flow steering rules for the port*/
1808	if (mdev->dev->caps.steering_mode ==
1809	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1810		ASSERT_RTNL();
1811		list_for_each_entry_safe(flow, tmp_flow,
1812					 &priv->ethtool_list, list) {
1813			mlx4_flow_detach(mdev->dev, flow->id);
1814			list_del(&flow->list);
1815		}
1816	}
1817
1818	mlx4_en_destroy_drop_qp(priv);
1819
1820	/* Free TX Rings */
1821	for (i = 0; i < priv->tx_ring_num; i++) {
1822		mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1823		mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1824	}
1825	msleep(10);
1826
1827	for (i = 0; i < priv->tx_ring_num; i++)
1828		mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1829
1830	/* Free RSS qps */
1831	mlx4_en_release_rss_steer(priv);
1832
1833	/* Unregister Mac address for the port */
1834	mlx4_en_put_qp(priv);
1835	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1836		mdev->mac_removed[priv->port] = 1;
1837
1838	/* Free RX Rings */
1839	for (i = 0; i < priv->rx_ring_num; i++) {
1840		struct mlx4_en_cq *cq = priv->rx_cq[i];
1841
1842		local_bh_disable();
1843		while (!mlx4_en_cq_lock_napi(cq)) {
1844			pr_info("CQ %d locked\n", i);
1845			mdelay(1);
1846		}
1847		local_bh_enable();
1848
1849		napi_synchronize(&cq->napi);
1850		mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1851		mlx4_en_deactivate_cq(priv, cq);
1852
1853		mlx4_en_free_affinity_hint(priv, i);
1854	}
1855}
1856
1857static void mlx4_en_restart(struct work_struct *work)
1858{
1859	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1860						 watchdog_task);
1861	struct mlx4_en_dev *mdev = priv->mdev;
1862	struct net_device *dev = priv->dev;
1863
1864	en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1865
1866	mutex_lock(&mdev->state_lock);
1867	if (priv->port_up) {
1868		mlx4_en_stop_port(dev, 1);
1869		if (mlx4_en_start_port(dev))
1870			en_err(priv, "Failed restarting port %d\n", priv->port);
1871	}
1872	mutex_unlock(&mdev->state_lock);
1873}
1874
1875static void mlx4_en_clear_stats(struct net_device *dev)
1876{
1877	struct mlx4_en_priv *priv = netdev_priv(dev);
1878	struct mlx4_en_dev *mdev = priv->mdev;
1879	int i;
1880
1881	if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1882		en_dbg(HW, priv, "Failed dumping statistics\n");
1883
1884	memset(&priv->stats, 0, sizeof(priv->stats));
1885	memset(&priv->pstats, 0, sizeof(priv->pstats));
1886	memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1887	memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1888	memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
1889	memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
1890	memset(&priv->rx_priority_flowstats, 0,
1891	       sizeof(priv->rx_priority_flowstats));
1892	memset(&priv->tx_priority_flowstats, 0,
1893	       sizeof(priv->tx_priority_flowstats));
1894
1895	for (i = 0; i < priv->tx_ring_num; i++) {
1896		priv->tx_ring[i]->bytes = 0;
1897		priv->tx_ring[i]->packets = 0;
1898		priv->tx_ring[i]->tx_csum = 0;
1899	}
1900	for (i = 0; i < priv->rx_ring_num; i++) {
1901		priv->rx_ring[i]->bytes = 0;
1902		priv->rx_ring[i]->packets = 0;
1903		priv->rx_ring[i]->csum_ok = 0;
1904		priv->rx_ring[i]->csum_none = 0;
1905		priv->rx_ring[i]->csum_complete = 0;
1906	}
1907}
1908
1909static int mlx4_en_open(struct net_device *dev)
1910{
1911	struct mlx4_en_priv *priv = netdev_priv(dev);
1912	struct mlx4_en_dev *mdev = priv->mdev;
1913	int err = 0;
1914
1915	mutex_lock(&mdev->state_lock);
1916
1917	if (!mdev->device_up) {
1918		en_err(priv, "Cannot open - device down/disabled\n");
1919		err = -EBUSY;
1920		goto out;
1921	}
1922
1923	/* Reset HW statistics and SW counters */
1924	mlx4_en_clear_stats(dev);
1925
1926	err = mlx4_en_start_port(dev);
1927	if (err)
1928		en_err(priv, "Failed starting port:%d\n", priv->port);
1929
1930out:
1931	mutex_unlock(&mdev->state_lock);
1932	return err;
1933}
1934
1935
1936static int mlx4_en_close(struct net_device *dev)
1937{
1938	struct mlx4_en_priv *priv = netdev_priv(dev);
1939	struct mlx4_en_dev *mdev = priv->mdev;
1940
1941	en_dbg(IFDOWN, priv, "Close port called\n");
1942
1943	mutex_lock(&mdev->state_lock);
1944
1945	mlx4_en_stop_port(dev, 0);
1946	netif_carrier_off(dev);
1947
1948	mutex_unlock(&mdev->state_lock);
1949	return 0;
1950}
1951
1952void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1953{
1954	int i;
1955
1956#ifdef CONFIG_RFS_ACCEL
1957	free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1958	priv->dev->rx_cpu_rmap = NULL;
1959#endif
1960
1961	for (i = 0; i < priv->tx_ring_num; i++) {
1962		if (priv->tx_ring && priv->tx_ring[i])
1963			mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1964		if (priv->tx_cq && priv->tx_cq[i])
1965			mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1966	}
1967
1968	for (i = 0; i < priv->rx_ring_num; i++) {
1969		if (priv->rx_ring[i])
1970			mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1971				priv->prof->rx_ring_size, priv->stride);
1972		if (priv->rx_cq[i])
1973			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1974	}
1975
1976}
1977
1978int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1979{
1980	struct mlx4_en_port_profile *prof = priv->prof;
1981	int i;
1982	int node;
1983
1984	/* Create tx Rings */
1985	for (i = 0; i < priv->tx_ring_num; i++) {
1986		node = cpu_to_node(i % num_online_cpus());
1987		if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1988				      prof->tx_ring_size, i, TX, node))
1989			goto err;
1990
1991		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
1992					   prof->tx_ring_size, TXBB_SIZE,
1993					   node, i))
1994			goto err;
1995	}
1996
1997	/* Create rx Rings */
1998	for (i = 0; i < priv->rx_ring_num; i++) {
1999		node = cpu_to_node(i % num_online_cpus());
2000		if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2001				      prof->rx_ring_size, i, RX, node))
2002			goto err;
2003
2004		if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2005					   prof->rx_ring_size, priv->stride,
2006					   node))
2007			goto err;
2008	}
2009
2010#ifdef CONFIG_RFS_ACCEL
2011	if (priv->mdev->dev->caps.comp_pool) {
2012		priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
2013		if (!priv->dev->rx_cpu_rmap)
2014			goto err;
2015	}
2016#endif
2017
2018	return 0;
2019
2020err:
2021	en_err(priv, "Failed to allocate NIC resources\n");
2022	for (i = 0; i < priv->rx_ring_num; i++) {
2023		if (priv->rx_ring[i])
2024			mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2025						prof->rx_ring_size,
2026						priv->stride);
2027		if (priv->rx_cq[i])
2028			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2029	}
2030	for (i = 0; i < priv->tx_ring_num; i++) {
2031		if (priv->tx_ring[i])
2032			mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2033		if (priv->tx_cq[i])
2034			mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2035	}
2036	return -ENOMEM;
2037}
2038
2039
2040void mlx4_en_destroy_netdev(struct net_device *dev)
2041{
2042	struct mlx4_en_priv *priv = netdev_priv(dev);
2043	struct mlx4_en_dev *mdev = priv->mdev;
2044
2045	en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2046
2047	/* Unregister device - this will close the port if it was up */
2048	if (priv->registered)
2049		unregister_netdev(dev);
2050
2051	if (priv->allocated)
2052		mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2053
2054	cancel_delayed_work(&priv->stats_task);
2055	cancel_delayed_work(&priv->service_task);
2056	/* flush any pending task for this netdev */
2057	flush_workqueue(mdev->workqueue);
2058
2059	/* Detach the netdev so tasks would not attempt to access it */
2060	mutex_lock(&mdev->state_lock);
2061	mdev->pndev[priv->port] = NULL;
2062	mdev->upper[priv->port] = NULL;
2063	mutex_unlock(&mdev->state_lock);
2064
2065	mlx4_en_free_resources(priv);
2066
2067	kfree(priv->tx_ring);
2068	kfree(priv->tx_cq);
2069
2070	free_netdev(dev);
2071}
2072
2073static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2074{
2075	struct mlx4_en_priv *priv = netdev_priv(dev);
2076	struct mlx4_en_dev *mdev = priv->mdev;
2077	int err = 0;
2078
2079	en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2080		 dev->mtu, new_mtu);
2081
2082	if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
2083		en_err(priv, "Bad MTU size:%d.\n", new_mtu);
2084		return -EPERM;
2085	}
2086	dev->mtu = new_mtu;
2087
2088	if (netif_running(dev)) {
2089		mutex_lock(&mdev->state_lock);
2090		if (!mdev->device_up) {
2091			/* NIC is probably restarting - let watchdog task reset
2092			 * the port */
2093			en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2094		} else {
2095			mlx4_en_stop_port(dev, 1);
2096			err = mlx4_en_start_port(dev);
2097			if (err) {
2098				en_err(priv, "Failed restarting port:%d\n",
2099					 priv->port);
2100				queue_work(mdev->workqueue, &priv->watchdog_task);
2101			}
2102		}
2103		mutex_unlock(&mdev->state_lock);
2104	}
2105	return 0;
2106}
2107
2108static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2109{
2110	struct mlx4_en_priv *priv = netdev_priv(dev);
2111	struct mlx4_en_dev *mdev = priv->mdev;
2112	struct hwtstamp_config config;
2113
2114	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2115		return -EFAULT;
2116
2117	/* reserved for future extensions */
2118	if (config.flags)
2119		return -EINVAL;
2120
2121	/* device doesn't support time stamping */
2122	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2123		return -EINVAL;
2124
2125	/* TX HW timestamp */
2126	switch (config.tx_type) {
2127	case HWTSTAMP_TX_OFF:
2128	case HWTSTAMP_TX_ON:
2129		break;
2130	default:
2131		return -ERANGE;
2132	}
2133
2134	/* RX HW timestamp */
2135	switch (config.rx_filter) {
2136	case HWTSTAMP_FILTER_NONE:
2137		break;
2138	case HWTSTAMP_FILTER_ALL:
2139	case HWTSTAMP_FILTER_SOME:
2140	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2141	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2142	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2143	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2144	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2145	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2146	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2147	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2148	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2149	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2150	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2151	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2152		config.rx_filter = HWTSTAMP_FILTER_ALL;
2153		break;
2154	default:
2155		return -ERANGE;
2156	}
2157
2158	if (mlx4_en_reset_config(dev, config, dev->features)) {
2159		config.tx_type = HWTSTAMP_TX_OFF;
2160		config.rx_filter = HWTSTAMP_FILTER_NONE;
2161	}
2162
2163	return copy_to_user(ifr->ifr_data, &config,
2164			    sizeof(config)) ? -EFAULT : 0;
2165}
2166
2167static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2168{
2169	struct mlx4_en_priv *priv = netdev_priv(dev);
2170
2171	return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2172			    sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2173}
2174
2175static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2176{
2177	switch (cmd) {
2178	case SIOCSHWTSTAMP:
2179		return mlx4_en_hwtstamp_set(dev, ifr);
2180	case SIOCGHWTSTAMP:
2181		return mlx4_en_hwtstamp_get(dev, ifr);
2182	default:
2183		return -EOPNOTSUPP;
2184	}
2185}
2186
2187static int mlx4_en_set_features(struct net_device *netdev,
2188		netdev_features_t features)
2189{
2190	struct mlx4_en_priv *priv = netdev_priv(netdev);
2191	bool reset = false;
2192	int ret = 0;
2193
2194	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2195		en_info(priv, "Turn %s RX-FCS\n",
2196			(features & NETIF_F_RXFCS) ? "ON" : "OFF");
2197		reset = true;
2198	}
2199
2200	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2201		u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2202
2203		en_info(priv, "Turn %s RX-ALL\n",
2204			ignore_fcs_value ? "ON" : "OFF");
2205		ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2206					      priv->port, ignore_fcs_value);
2207		if (ret)
2208			return ret;
2209	}
2210
2211	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2212		en_info(priv, "Turn %s RX vlan strip offload\n",
2213			(features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2214		reset = true;
2215	}
2216
2217	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2218		en_info(priv, "Turn %s TX vlan strip offload\n",
2219			(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2220
2221	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2222		en_info(priv, "Turn %s loopback\n",
2223			(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2224		mlx4_en_update_loopback_state(netdev, features);
2225	}
2226
2227	if (reset) {
2228		ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2229					   features);
2230		if (ret)
2231			return ret;
2232	}
2233
2234	return 0;
2235}
2236
2237static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2238{
2239	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2240	struct mlx4_en_dev *mdev = en_priv->mdev;
2241	u64 mac_u64 = mlx4_mac_to_u64(mac);
2242
2243	if (!is_valid_ether_addr(mac))
2244		return -EINVAL;
2245
2246	return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2247}
2248
2249static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2250{
2251	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2252	struct mlx4_en_dev *mdev = en_priv->mdev;
2253
2254	return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2255}
2256
2257static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2258			       int max_tx_rate)
2259{
2260	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2261	struct mlx4_en_dev *mdev = en_priv->mdev;
2262
2263	return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2264				max_tx_rate);
2265}
2266
2267static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2268{
2269	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2270	struct mlx4_en_dev *mdev = en_priv->mdev;
2271
2272	return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2273}
2274
2275static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2276{
2277	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2278	struct mlx4_en_dev *mdev = en_priv->mdev;
2279
2280	return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2281}
2282
2283static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2284{
2285	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2286	struct mlx4_en_dev *mdev = en_priv->mdev;
2287
2288	return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2289}
2290
2291#define PORT_ID_BYTE_LEN 8
2292static int mlx4_en_get_phys_port_id(struct net_device *dev,
2293				    struct netdev_phys_item_id *ppid)
2294{
2295	struct mlx4_en_priv *priv = netdev_priv(dev);
2296	struct mlx4_dev *mdev = priv->mdev->dev;
2297	int i;
2298	u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2299
2300	if (!phys_port_id)
2301		return -EOPNOTSUPP;
2302
2303	ppid->id_len = sizeof(phys_port_id);
2304	for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2305		ppid->id[i] =  phys_port_id & 0xff;
2306		phys_port_id >>= 8;
2307	}
2308	return 0;
2309}
2310
2311#ifdef CONFIG_MLX4_EN_VXLAN
2312static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2313{
2314	int ret;
2315	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2316						 vxlan_add_task);
2317
2318	ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2319	if (ret)
2320		goto out;
2321
2322	ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2323				  VXLAN_STEER_BY_OUTER_MAC, 1);
2324out:
2325	if (ret) {
2326		en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2327		return;
2328	}
2329
2330	/* set offloads */
2331	priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2332				      NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2333}
2334
2335static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2336{
2337	int ret;
2338	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2339						 vxlan_del_task);
2340	/* unset offloads */
2341	priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2342				      NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2343
2344	ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2345				  VXLAN_STEER_BY_OUTER_MAC, 0);
2346	if (ret)
2347		en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2348
2349	priv->vxlan_port = 0;
2350}
2351
2352static void mlx4_en_add_vxlan_port(struct  net_device *dev,
2353				   sa_family_t sa_family, __be16 port)
2354{
2355	struct mlx4_en_priv *priv = netdev_priv(dev);
2356	__be16 current_port;
2357
2358	if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2359		return;
2360
2361	if (sa_family == AF_INET6)
2362		return;
2363
2364	current_port = priv->vxlan_port;
2365	if (current_port && current_port != port) {
2366		en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2367			ntohs(current_port), ntohs(port));
2368		return;
2369	}
2370
2371	priv->vxlan_port = port;
2372	queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2373}
2374
2375static void mlx4_en_del_vxlan_port(struct  net_device *dev,
2376				   sa_family_t sa_family, __be16 port)
2377{
2378	struct mlx4_en_priv *priv = netdev_priv(dev);
2379	__be16 current_port;
2380
2381	if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2382		return;
2383
2384	if (sa_family == AF_INET6)
2385		return;
2386
2387	current_port = priv->vxlan_port;
2388	if (current_port != port) {
2389		en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2390		return;
2391	}
2392
2393	queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2394}
2395
2396static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2397						struct net_device *dev,
2398						netdev_features_t features)
2399{
2400	features = vlan_features_check(skb, features);
2401	return vxlan_features_check(skb, features);
2402}
2403#endif
2404
2405static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2406{
2407	struct mlx4_en_priv *priv = netdev_priv(dev);
2408	struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
2409	struct mlx4_update_qp_params params;
2410	int err;
2411
2412	if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2413		return -EOPNOTSUPP;
2414
2415	/* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2416	if (maxrate >> 12) {
2417		params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2418		params.rate_val  = maxrate / 1000;
2419	} else if (maxrate) {
2420		params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2421		params.rate_val  = maxrate;
2422	} else { /* zero serves to revoke the QP rate-limitation */
2423		params.rate_unit = 0;
2424		params.rate_val  = 0;
2425	}
2426
2427	err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2428			     &params);
2429	return err;
2430}
2431
2432static const struct net_device_ops mlx4_netdev_ops = {
2433	.ndo_open		= mlx4_en_open,
2434	.ndo_stop		= mlx4_en_close,
2435	.ndo_start_xmit		= mlx4_en_xmit,
2436	.ndo_select_queue	= mlx4_en_select_queue,
2437	.ndo_get_stats		= mlx4_en_get_stats,
2438	.ndo_set_rx_mode	= mlx4_en_set_rx_mode,
2439	.ndo_set_mac_address	= mlx4_en_set_mac,
2440	.ndo_validate_addr	= eth_validate_addr,
2441	.ndo_change_mtu		= mlx4_en_change_mtu,
2442	.ndo_do_ioctl		= mlx4_en_ioctl,
2443	.ndo_tx_timeout		= mlx4_en_tx_timeout,
2444	.ndo_vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
2445	.ndo_vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
2446#ifdef CONFIG_NET_POLL_CONTROLLER
2447	.ndo_poll_controller	= mlx4_en_netpoll,
2448#endif
2449	.ndo_set_features	= mlx4_en_set_features,
2450	.ndo_setup_tc		= mlx4_en_setup_tc,
2451#ifdef CONFIG_RFS_ACCEL
2452	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
2453#endif
2454#ifdef CONFIG_NET_RX_BUSY_POLL
2455	.ndo_busy_poll		= mlx4_en_low_latency_recv,
2456#endif
2457	.ndo_get_phys_port_id	= mlx4_en_get_phys_port_id,
2458#ifdef CONFIG_MLX4_EN_VXLAN
2459	.ndo_add_vxlan_port	= mlx4_en_add_vxlan_port,
2460	.ndo_del_vxlan_port	= mlx4_en_del_vxlan_port,
2461	.ndo_features_check	= mlx4_en_features_check,
2462#endif
2463	.ndo_set_tx_maxrate	= mlx4_en_set_tx_maxrate,
2464};
2465
2466static const struct net_device_ops mlx4_netdev_ops_master = {
2467	.ndo_open		= mlx4_en_open,
2468	.ndo_stop		= mlx4_en_close,
2469	.ndo_start_xmit		= mlx4_en_xmit,
2470	.ndo_select_queue	= mlx4_en_select_queue,
2471	.ndo_get_stats		= mlx4_en_get_stats,
2472	.ndo_set_rx_mode	= mlx4_en_set_rx_mode,
2473	.ndo_set_mac_address	= mlx4_en_set_mac,
2474	.ndo_validate_addr	= eth_validate_addr,
2475	.ndo_change_mtu		= mlx4_en_change_mtu,
2476	.ndo_tx_timeout		= mlx4_en_tx_timeout,
2477	.ndo_vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
2478	.ndo_vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
2479	.ndo_set_vf_mac		= mlx4_en_set_vf_mac,
2480	.ndo_set_vf_vlan	= mlx4_en_set_vf_vlan,
2481	.ndo_set_vf_rate	= mlx4_en_set_vf_rate,
2482	.ndo_set_vf_spoofchk	= mlx4_en_set_vf_spoofchk,
2483	.ndo_set_vf_link_state	= mlx4_en_set_vf_link_state,
2484	.ndo_get_vf_config	= mlx4_en_get_vf_config,
2485#ifdef CONFIG_NET_POLL_CONTROLLER
2486	.ndo_poll_controller	= mlx4_en_netpoll,
2487#endif
2488	.ndo_set_features	= mlx4_en_set_features,
2489	.ndo_setup_tc		= mlx4_en_setup_tc,
2490#ifdef CONFIG_RFS_ACCEL
2491	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
2492#endif
2493	.ndo_get_phys_port_id	= mlx4_en_get_phys_port_id,
2494#ifdef CONFIG_MLX4_EN_VXLAN
2495	.ndo_add_vxlan_port	= mlx4_en_add_vxlan_port,
2496	.ndo_del_vxlan_port	= mlx4_en_del_vxlan_port,
2497	.ndo_features_check	= mlx4_en_features_check,
2498#endif
2499	.ndo_set_tx_maxrate	= mlx4_en_set_tx_maxrate,
2500};
2501
2502struct mlx4_en_bond {
2503	struct work_struct work;
2504	struct mlx4_en_priv *priv;
2505	int is_bonded;
2506	struct mlx4_port_map port_map;
2507};
2508
2509static void mlx4_en_bond_work(struct work_struct *work)
2510{
2511	struct mlx4_en_bond *bond = container_of(work,
2512						     struct mlx4_en_bond,
2513						     work);
2514	int err = 0;
2515	struct mlx4_dev *dev = bond->priv->mdev->dev;
2516
2517	if (bond->is_bonded) {
2518		if (!mlx4_is_bonded(dev)) {
2519			err = mlx4_bond(dev);
2520			if (err)
2521				en_err(bond->priv, "Fail to bond device\n");
2522		}
2523		if (!err) {
2524			err = mlx4_port_map_set(dev, &bond->port_map);
2525			if (err)
2526				en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2527				       bond->port_map.port1,
2528				       bond->port_map.port2,
2529				       err);
2530		}
2531	} else if (mlx4_is_bonded(dev)) {
2532		err = mlx4_unbond(dev);
2533		if (err)
2534			en_err(bond->priv, "Fail to unbond device\n");
2535	}
2536	dev_put(bond->priv->dev);
2537	kfree(bond);
2538}
2539
2540static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2541				   u8 v2p_p1, u8 v2p_p2)
2542{
2543	struct mlx4_en_bond *bond = NULL;
2544
2545	bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2546	if (!bond)
2547		return -ENOMEM;
2548
2549	INIT_WORK(&bond->work, mlx4_en_bond_work);
2550	bond->priv = priv;
2551	bond->is_bonded = is_bonded;
2552	bond->port_map.port1 = v2p_p1;
2553	bond->port_map.port2 = v2p_p2;
2554	dev_hold(priv->dev);
2555	queue_work(priv->mdev->workqueue, &bond->work);
2556	return 0;
2557}
2558
2559int mlx4_en_netdev_event(struct notifier_block *this,
2560			 unsigned long event, void *ptr)
2561{
2562	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2563	u8 port = 0;
2564	struct mlx4_en_dev *mdev;
2565	struct mlx4_dev *dev;
2566	int i, num_eth_ports = 0;
2567	bool do_bond = true;
2568	struct mlx4_en_priv *priv;
2569	u8 v2p_port1 = 0;
2570	u8 v2p_port2 = 0;
2571
2572	if (!net_eq(dev_net(ndev), &init_net))
2573		return NOTIFY_DONE;
2574
2575	mdev = container_of(this, struct mlx4_en_dev, nb);
2576	dev = mdev->dev;
2577
2578	/* Go into this mode only when two network devices set on two ports
2579	 * of the same mlx4 device are slaves of the same bonding master
2580	 */
2581	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2582		++num_eth_ports;
2583		if (!port && (mdev->pndev[i] == ndev))
2584			port = i;
2585		mdev->upper[i] = mdev->pndev[i] ?
2586			netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2587		/* condition not met: network device is a slave */
2588		if (!mdev->upper[i])
2589			do_bond = false;
2590		if (num_eth_ports < 2)
2591			continue;
2592		/* condition not met: same master */
2593		if (mdev->upper[i] != mdev->upper[i-1])
2594			do_bond = false;
2595	}
2596	/* condition not met: 2 salves */
2597	do_bond = (num_eth_ports ==  2) ? do_bond : false;
2598
2599	/* handle only events that come with enough info */
2600	if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2601		return NOTIFY_DONE;
2602
2603	priv = netdev_priv(ndev);
2604	if (do_bond) {
2605		struct netdev_notifier_bonding_info *notifier_info = ptr;
2606		struct netdev_bonding_info *bonding_info =
2607			&notifier_info->bonding_info;
2608
2609		/* required mode 1, 2 or 4 */
2610		if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2611		    (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2612		    (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2613			do_bond = false;
2614
2615		/* require exactly 2 slaves */
2616		if (bonding_info->master.num_slaves != 2)
2617			do_bond = false;
2618
2619		/* calc v2p */
2620		if (do_bond) {
2621			if (bonding_info->master.bond_mode ==
2622			    BOND_MODE_ACTIVEBACKUP) {
2623				/* in active-backup mode virtual ports are
2624				 * mapped to the physical port of the active
2625				 * slave */
2626				if (bonding_info->slave.state ==
2627				    BOND_STATE_BACKUP) {
2628					if (port == 1) {
2629						v2p_port1 = 2;
2630						v2p_port2 = 2;
2631					} else {
2632						v2p_port1 = 1;
2633						v2p_port2 = 1;
2634					}
2635				} else { /* BOND_STATE_ACTIVE */
2636					if (port == 1) {
2637						v2p_port1 = 1;
2638						v2p_port2 = 1;
2639					} else {
2640						v2p_port1 = 2;
2641						v2p_port2 = 2;
2642					}
2643				}
2644			} else { /* Active-Active */
2645				/* in active-active mode a virtual port is
2646				 * mapped to the native physical port if and only
2647				 * if the physical port is up */
2648				__s8 link = bonding_info->slave.link;
2649
2650				if (port == 1)
2651					v2p_port2 = 2;
2652				else
2653					v2p_port1 = 1;
2654				if ((link == BOND_LINK_UP) ||
2655				    (link == BOND_LINK_FAIL)) {
2656					if (port == 1)
2657						v2p_port1 = 1;
2658					else
2659						v2p_port2 = 2;
2660				} else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2661					if (port == 1)
2662						v2p_port1 = 2;
2663					else
2664						v2p_port2 = 1;
2665				}
2666			}
2667		}
2668	}
2669
2670	mlx4_en_queue_bond_work(priv, do_bond,
2671				v2p_port1, v2p_port2);
2672
2673	return NOTIFY_DONE;
2674}
2675
2676void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
2677				     struct mlx4_en_stats_bitmap *stats_bitmap,
2678				     u8 rx_ppp, u8 rx_pause,
2679				     u8 tx_ppp, u8 tx_pause)
2680{
2681	int last_i = NUM_MAIN_STATS + NUM_PORT_STATS;
2682
2683	if (!mlx4_is_slave(dev) &&
2684	    (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
2685		mutex_lock(&stats_bitmap->mutex);
2686		bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
2687
2688		if (rx_ppp)
2689			bitmap_set(stats_bitmap->bitmap, last_i,
2690				   NUM_FLOW_PRIORITY_STATS_RX);
2691		last_i += NUM_FLOW_PRIORITY_STATS_RX;
2692
2693		if (rx_pause && !(rx_ppp))
2694			bitmap_set(stats_bitmap->bitmap, last_i,
2695				   NUM_FLOW_STATS_RX);
2696		last_i += NUM_FLOW_STATS_RX;
2697
2698		if (tx_ppp)
2699			bitmap_set(stats_bitmap->bitmap, last_i,
2700				   NUM_FLOW_PRIORITY_STATS_TX);
2701		last_i += NUM_FLOW_PRIORITY_STATS_TX;
2702
2703		if (tx_pause && !(tx_ppp))
2704			bitmap_set(stats_bitmap->bitmap, last_i,
2705				   NUM_FLOW_STATS_TX);
2706		last_i += NUM_FLOW_STATS_TX;
2707
2708		mutex_unlock(&stats_bitmap->mutex);
2709	}
2710}
2711
2712void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
2713			      struct mlx4_en_stats_bitmap *stats_bitmap,
2714			      u8 rx_ppp, u8 rx_pause,
2715			      u8 tx_ppp, u8 tx_pause)
2716{
2717	int last_i = 0;
2718
2719	mutex_init(&stats_bitmap->mutex);
2720	bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
2721
2722	if (mlx4_is_slave(dev)) {
2723		bitmap_set(stats_bitmap->bitmap, last_i +
2724					 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
2725		bitmap_set(stats_bitmap->bitmap, last_i +
2726					 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
2727		bitmap_set(stats_bitmap->bitmap, last_i +
2728					 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
2729		bitmap_set(stats_bitmap->bitmap, last_i +
2730					 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
2731		bitmap_set(stats_bitmap->bitmap, last_i +
2732					 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
2733		bitmap_set(stats_bitmap->bitmap, last_i +
2734					 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
2735	} else {
2736		bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
2737	}
2738	last_i += NUM_MAIN_STATS;
2739
2740	bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
2741	last_i += NUM_PORT_STATS;
2742
2743	mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
2744					rx_ppp, rx_pause,
2745					tx_ppp, tx_pause);
2746	last_i += NUM_FLOW_STATS;
2747
2748	if (!mlx4_is_slave(dev))
2749		bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
2750}
2751
2752int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2753			struct mlx4_en_port_profile *prof)
2754{
2755	struct net_device *dev;
2756	struct mlx4_en_priv *priv;
2757	int i;
2758	int err;
2759	u64 mac_u64;
2760
2761	dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
2762				 MAX_TX_RINGS, MAX_RX_RINGS);
2763	if (dev == NULL)
2764		return -ENOMEM;
2765
2766	netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2767	netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2768
2769	SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
2770	dev->dev_port = port - 1;
2771
2772	/*
2773	 * Initialize driver private data
2774	 */
2775
2776	priv = netdev_priv(dev);
2777	memset(priv, 0, sizeof(struct mlx4_en_priv));
2778	spin_lock_init(&priv->stats_lock);
2779	INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2780	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2781	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2782	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2783	INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2784#ifdef CONFIG_MLX4_EN_VXLAN
2785	INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
2786	INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
2787#endif
2788#ifdef CONFIG_RFS_ACCEL
2789	INIT_LIST_HEAD(&priv->filters);
2790	spin_lock_init(&priv->filters_lock);
2791#endif
2792
2793	priv->dev = dev;
2794	priv->mdev = mdev;
2795	priv->ddev = &mdev->pdev->dev;
2796	priv->prof = prof;
2797	priv->port = port;
2798	priv->port_up = false;
2799	priv->flags = prof->flags;
2800	priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
2801	priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2802			MLX4_WQE_CTRL_SOLICITED);
2803	priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2804	priv->tx_ring_num = prof->tx_ring_num;
2805	priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
2806	netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
2807
2808	priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2809				GFP_KERNEL);
2810	if (!priv->tx_ring) {
2811		err = -ENOMEM;
2812		goto out;
2813	}
2814	priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2815			      GFP_KERNEL);
2816	if (!priv->tx_cq) {
2817		err = -ENOMEM;
2818		goto out;
2819	}
2820	priv->rx_ring_num = prof->rx_ring_num;
2821	priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2822	priv->cqe_size = mdev->dev->caps.cqe_size;
2823	priv->mac_index = -1;
2824	priv->msg_enable = MLX4_EN_MSG_LEVEL;
2825#ifdef CONFIG_MLX4_EN_DCB
2826	if (!mlx4_is_slave(priv->mdev->dev)) {
2827		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2828			dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2829		} else {
2830			en_info(priv, "enabling only PFC DCB ops\n");
2831			dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2832		}
2833	}
2834#endif
2835
2836	for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2837		INIT_HLIST_HEAD(&priv->mac_hash[i]);
2838
2839	/* Query for default mac and max mtu */
2840	priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2841
2842	if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
2843	    MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
2844		priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
2845
2846	/* Set default MAC */
2847	dev->addr_len = ETH_ALEN;
2848	mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
2849	if (!is_valid_ether_addr(dev->dev_addr)) {
2850		if (mlx4_is_slave(priv->mdev->dev)) {
2851			eth_hw_addr_random(dev);
2852			en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
2853			mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
2854			mdev->dev->caps.def_mac[priv->port] = mac_u64;
2855		} else {
2856			en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
2857			       priv->port, dev->dev_addr);
2858			err = -EINVAL;
2859			goto out;
2860		}
2861	}
2862
2863	memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
2864
2865	priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2866					  DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2867	err = mlx4_en_alloc_resources(priv);
2868	if (err)
2869		goto out;
2870
2871	/* Initialize time stamping config */
2872	priv->hwtstamp_config.flags = 0;
2873	priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
2874	priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2875
2876	/* Allocate page for receive rings */
2877	err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2878				MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2879	if (err) {
2880		en_err(priv, "Failed to allocate page for rx qps\n");
2881		goto out;
2882	}
2883	priv->allocated = 1;
2884
2885	/*
2886	 * Initialize netdev entry points
2887	 */
2888	if (mlx4_is_master(priv->mdev->dev))
2889		dev->netdev_ops = &mlx4_netdev_ops_master;
2890	else
2891		dev->netdev_ops = &mlx4_netdev_ops;
2892	dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
2893	netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2894	netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2895
2896	dev->ethtool_ops = &mlx4_en_ethtool_ops;
2897
2898	/*
2899	 * Set driver features
2900	 */
2901	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2902	if (mdev->LSO_support)
2903		dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2904
2905	dev->vlan_features = dev->hw_features;
2906
2907	dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
2908	dev->features = dev->hw_features | NETIF_F_HIGHDMA |
2909			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2910			NETIF_F_HW_VLAN_CTAG_FILTER;
2911	dev->hw_features |= NETIF_F_LOOPBACK |
2912			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2913
2914	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
2915		dev->hw_features |= NETIF_F_RXFCS;
2916
2917	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
2918		dev->hw_features |= NETIF_F_RXALL;
2919
2920	if (mdev->dev->caps.steering_mode ==
2921	    MLX4_STEERING_MODE_DEVICE_MANAGED &&
2922	    mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
2923		dev->hw_features |= NETIF_F_NTUPLE;
2924
2925	if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
2926		dev->priv_flags |= IFF_UNICAST_FLT;
2927
2928	/* Setting a default hash function value */
2929	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
2930		priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2931	} else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
2932		priv->rss_hash_fn = ETH_RSS_HASH_XOR;
2933	} else {
2934		en_warn(priv,
2935			"No RSS hash capabilities exposed, using Toeplitz\n");
2936		priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2937	}
2938
2939	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2940		dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2941		dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
2942	}
2943
2944	mdev->pndev[port] = dev;
2945	mdev->upper[port] = NULL;
2946
2947	netif_carrier_off(dev);
2948	mlx4_en_set_default_moderation(priv);
2949
2950	en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2951	en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2952
2953	mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
2954
2955	/* Configure port */
2956	mlx4_en_calc_rx_buf(dev);
2957	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2958				    priv->rx_skb_size + ETH_FCS_LEN,
2959				    prof->tx_pause, prof->tx_ppp,
2960				    prof->rx_pause, prof->rx_ppp);
2961	if (err) {
2962		en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
2963		       priv->port, err);
2964		goto out;
2965	}
2966
2967	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2968		err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
2969		if (err) {
2970			en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
2971			       err);
2972			goto out;
2973		}
2974	}
2975
2976	/* Init port */
2977	en_warn(priv, "Initializing port\n");
2978	err = mlx4_INIT_PORT(mdev->dev, priv->port);
2979	if (err) {
2980		en_err(priv, "Failed Initializing port\n");
2981		goto out;
2982	}
2983	queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2984
2985	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2986		queue_delayed_work(mdev->workqueue, &priv->service_task,
2987				   SERVICE_TASK_DELAY);
2988
2989	mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
2990				 mdev->profile.prof[priv->port].rx_ppp,
2991				 mdev->profile.prof[priv->port].rx_pause,
2992				 mdev->profile.prof[priv->port].tx_ppp,
2993				 mdev->profile.prof[priv->port].tx_pause);
2994
2995	err = register_netdev(dev);
2996	if (err) {
2997		en_err(priv, "Netdev registration failed for port %d\n", port);
2998		goto out;
2999	}
3000
3001	priv->registered = 1;
3002
3003	return 0;
3004
3005out:
3006	mlx4_en_destroy_netdev(dev);
3007	return err;
3008}
3009
3010int mlx4_en_reset_config(struct net_device *dev,
3011			 struct hwtstamp_config ts_config,
3012			 netdev_features_t features)
3013{
3014	struct mlx4_en_priv *priv = netdev_priv(dev);
3015	struct mlx4_en_dev *mdev = priv->mdev;
3016	int port_up = 0;
3017	int err = 0;
3018
3019	if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3020	    priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3021	    !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3022	    !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3023		return 0; /* Nothing to change */
3024
3025	if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3026	    (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3027	    (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3028		en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3029		return -EINVAL;
3030	}
3031
3032	mutex_lock(&mdev->state_lock);
3033	if (priv->port_up) {
3034		port_up = 1;
3035		mlx4_en_stop_port(dev, 1);
3036	}
3037
3038	mlx4_en_free_resources(priv);
3039
3040	en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
3041		ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3042
3043	priv->hwtstamp_config.tx_type = ts_config.tx_type;
3044	priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
3045
3046	if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3047		if (features & NETIF_F_HW_VLAN_CTAG_RX)
3048			dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3049		else
3050			dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3051	} else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3052		/* RX time-stamping is OFF, update the RX vlan offload
3053		 * to the latest wanted state
3054		 */
3055		if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3056			dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3057		else
3058			dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3059	}
3060
3061	if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3062		if (features & NETIF_F_RXFCS)
3063			dev->features |= NETIF_F_RXFCS;
3064		else
3065			dev->features &= ~NETIF_F_RXFCS;
3066	}
3067
3068	/* RX vlan offload and RX time-stamping can't co-exist !
3069	 * Regardless of the caller's choice,
3070	 * Turn Off RX vlan offload in case of time-stamping is ON
3071	 */
3072	if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3073		if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3074			en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3075		dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3076	}
3077
3078	err = mlx4_en_alloc_resources(priv);
3079	if (err) {
3080		en_err(priv, "Failed reallocating port resources\n");
3081		goto out;
3082	}
3083	if (port_up) {
3084		err = mlx4_en_start_port(dev);
3085		if (err)
3086			en_err(priv, "Failed starting port\n");
3087	}
3088
3089out:
3090	mutex_unlock(&mdev->state_lock);
3091	netdev_features_change(dev);
3092	return err;
3093}
3094