1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program.  If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27#include "i40evf.h"
28#include "i40e_prototype.h"
29
30/* busy wait delay in msec */
31#define I40EVF_BUSY_WAIT_DELAY 10
32#define I40EVF_BUSY_WAIT_COUNT 50
33
34/**
35 * i40evf_send_pf_msg
36 * @adapter: adapter structure
37 * @op: virtual channel opcode
38 * @msg: pointer to message buffer
39 * @len: message length
40 *
41 * Send message to PF and print status if failure.
42 **/
43static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
44			      enum i40e_virtchnl_ops op, u8 *msg, u16 len)
45{
46	struct i40e_hw *hw = &adapter->hw;
47	i40e_status err;
48
49	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
50		return 0; /* nothing to see here, move along */
51
52	err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
53	if (err)
54		dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
55			op, err, hw->aq.asq_last_status);
56	return err;
57}
58
59/**
60 * i40evf_send_api_ver
61 * @adapter: adapter structure
62 *
63 * Send API version admin queue message to the PF. The reply is not checked
64 * in this function. Returns 0 if the message was successfully
65 * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
66 **/
67int i40evf_send_api_ver(struct i40evf_adapter *adapter)
68{
69	struct i40e_virtchnl_version_info vvi;
70
71	vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
72	vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
73
74	return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
75				  sizeof(vvi));
76}
77
78/**
79 * i40evf_verify_api_ver
80 * @adapter: adapter structure
81 *
82 * Compare API versions with the PF. Must be called after admin queue is
83 * initialized. Returns 0 if API versions match, -EIO if they do not,
84 * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
85 * from the firmware are propagated.
86 **/
87int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
88{
89	struct i40e_virtchnl_version_info *pf_vvi;
90	struct i40e_hw *hw = &adapter->hw;
91	struct i40e_arq_event_info event;
92	enum i40e_virtchnl_ops op;
93	i40e_status err;
94
95	event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
96	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
97	if (!event.msg_buf) {
98		err = -ENOMEM;
99		goto out;
100	}
101
102	while (1) {
103		err = i40evf_clean_arq_element(hw, &event, NULL);
104		/* When the AQ is empty, i40evf_clean_arq_element will return
105		 * nonzero and this loop will terminate.
106		 */
107		if (err)
108			goto out_alloc;
109		op =
110		    (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
111		if (op == I40E_VIRTCHNL_OP_VERSION)
112			break;
113	}
114
115
116	err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
117	if (err)
118		goto out_alloc;
119
120	if (op != I40E_VIRTCHNL_OP_VERSION) {
121		dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
122			op);
123		err = -EIO;
124		goto out_alloc;
125	}
126
127	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
128	if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
129	    (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
130		err = -EIO;
131
132out_alloc:
133	kfree(event.msg_buf);
134out:
135	return err;
136}
137
138/**
139 * i40evf_send_vf_config_msg
140 * @adapter: adapter structure
141 *
142 * Send VF configuration request admin queue message to the PF. The reply
143 * is not checked in this function. Returns 0 if the message was
144 * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
145 **/
146int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
147{
148	return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
149				  NULL, 0);
150}
151
152/**
153 * i40evf_get_vf_config
154 * @hw: pointer to the hardware structure
155 * @len: length of buffer
156 *
157 * Get VF configuration from PF and populate hw structure. Must be called after
158 * admin queue is initialized. Busy waits until response is received from PF,
159 * with maximum timeout. Response from PF is returned in the buffer for further
160 * processing by the caller.
161 **/
162int i40evf_get_vf_config(struct i40evf_adapter *adapter)
163{
164	struct i40e_hw *hw = &adapter->hw;
165	struct i40e_arq_event_info event;
166	enum i40e_virtchnl_ops op;
167	i40e_status err;
168	u16 len;
169
170	len =  sizeof(struct i40e_virtchnl_vf_resource) +
171		I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
172	event.buf_len = len;
173	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
174	if (!event.msg_buf) {
175		err = -ENOMEM;
176		goto out;
177	}
178
179	while (1) {
180		/* When the AQ is empty, i40evf_clean_arq_element will return
181		 * nonzero and this loop will terminate.
182		 */
183		err = i40evf_clean_arq_element(hw, &event, NULL);
184		if (err)
185			goto out_alloc;
186		op =
187		    (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
188		if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES)
189			break;
190	}
191
192	err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
193	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
194
195	i40e_vf_parse_hw_config(hw, adapter->vf_res);
196out_alloc:
197	kfree(event.msg_buf);
198out:
199	return err;
200}
201
202/**
203 * i40evf_configure_queues
204 * @adapter: adapter structure
205 *
206 * Request that the PF set up our (previously allocated) queues.
207 **/
208void i40evf_configure_queues(struct i40evf_adapter *adapter)
209{
210	struct i40e_virtchnl_vsi_queue_config_info *vqci;
211	struct i40e_virtchnl_queue_pair_info *vqpi;
212	int pairs = adapter->num_active_queues;
213	int i, len;
214
215	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
216		/* bail because we already have a command pending */
217		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
218			__func__, adapter->current_op);
219		return;
220	}
221	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
222	len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
223		       (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
224	vqci = kzalloc(len, GFP_ATOMIC);
225	if (!vqci)
226		return;
227
228	vqci->vsi_id = adapter->vsi_res->vsi_id;
229	vqci->num_queue_pairs = pairs;
230	vqpi = vqci->qpair;
231	/* Size check is not needed here - HW max is 16 queue pairs, and we
232	 * can fit info for 31 of them into the AQ buffer before it overflows.
233	 */
234	for (i = 0; i < pairs; i++) {
235		vqpi->txq.vsi_id = vqci->vsi_id;
236		vqpi->txq.queue_id = i;
237		vqpi->txq.ring_len = adapter->tx_rings[i]->count;
238		vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
239		vqpi->txq.headwb_enabled = 1;
240		vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
241		    (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
242
243		vqpi->rxq.vsi_id = vqci->vsi_id;
244		vqpi->rxq.queue_id = i;
245		vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
246		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
247		vqpi->rxq.max_pkt_size = adapter->netdev->mtu
248					+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
249		vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
250		vqpi++;
251	}
252
253	adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
254	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
255			   (u8 *)vqci, len);
256	kfree(vqci);
257}
258
259/**
260 * i40evf_enable_queues
261 * @adapter: adapter structure
262 *
263 * Request that the PF enable all of our queues.
264 **/
265void i40evf_enable_queues(struct i40evf_adapter *adapter)
266{
267	struct i40e_virtchnl_queue_select vqs;
268
269	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
270		/* bail because we already have a command pending */
271		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
272			__func__, adapter->current_op);
273		return;
274	}
275	adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
276	vqs.vsi_id = adapter->vsi_res->vsi_id;
277	vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
278	vqs.rx_queues = vqs.tx_queues;
279	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
280	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
281			   (u8 *)&vqs, sizeof(vqs));
282}
283
284/**
285 * i40evf_disable_queues
286 * @adapter: adapter structure
287 *
288 * Request that the PF disable all of our queues.
289 **/
290void i40evf_disable_queues(struct i40evf_adapter *adapter)
291{
292	struct i40e_virtchnl_queue_select vqs;
293
294	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
295		/* bail because we already have a command pending */
296		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
297			__func__, adapter->current_op);
298		return;
299	}
300	adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
301	vqs.vsi_id = adapter->vsi_res->vsi_id;
302	vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
303	vqs.rx_queues = vqs.tx_queues;
304	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
305	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
306			   (u8 *)&vqs, sizeof(vqs));
307}
308
309/**
310 * i40evf_map_queues
311 * @adapter: adapter structure
312 *
313 * Request that the PF map queues to interrupt vectors. Misc causes, including
314 * admin queue, are always mapped to vector 0.
315 **/
316void i40evf_map_queues(struct i40evf_adapter *adapter)
317{
318	struct i40e_virtchnl_irq_map_info *vimi;
319	int v_idx, q_vectors, len;
320	struct i40e_q_vector *q_vector;
321
322	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
323		/* bail because we already have a command pending */
324		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
325			__func__, adapter->current_op);
326		return;
327	}
328	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
329
330	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
331
332	len = sizeof(struct i40e_virtchnl_irq_map_info) +
333	      (adapter->num_msix_vectors *
334		sizeof(struct i40e_virtchnl_vector_map));
335	vimi = kzalloc(len, GFP_ATOMIC);
336	if (!vimi)
337		return;
338
339	vimi->num_vectors = adapter->num_msix_vectors;
340	/* Queue vectors first */
341	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
342		q_vector = adapter->q_vector[v_idx];
343		vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
344		vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
345		vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
346		vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
347	}
348	/* Misc vector last - this is only for AdminQ messages */
349	vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
350	vimi->vecmap[v_idx].vector_id = 0;
351	vimi->vecmap[v_idx].txq_map = 0;
352	vimi->vecmap[v_idx].rxq_map = 0;
353
354	adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
355	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
356			   (u8 *)vimi, len);
357	kfree(vimi);
358}
359
360/**
361 * i40evf_add_ether_addrs
362 * @adapter: adapter structure
363 * @addrs: the MAC address filters to add (contiguous)
364 * @count: number of filters
365 *
366 * Request that the PF add one or more addresses to our filters.
367 **/
368void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
369{
370	struct i40e_virtchnl_ether_addr_list *veal;
371	int len, i = 0, count = 0;
372	struct i40evf_mac_filter *f;
373
374	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
375		/* bail because we already have a command pending */
376		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
377			__func__, adapter->current_op);
378		return;
379	}
380	list_for_each_entry(f, &adapter->mac_filter_list, list) {
381		if (f->add)
382			count++;
383	}
384	if (!count) {
385		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
386		return;
387	}
388	adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
389
390	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
391	      (count * sizeof(struct i40e_virtchnl_ether_addr));
392	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
393		dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
394			 __func__);
395		count = (I40EVF_MAX_AQ_BUF_SIZE -
396			 sizeof(struct i40e_virtchnl_ether_addr_list)) /
397			sizeof(struct i40e_virtchnl_ether_addr);
398		len = I40EVF_MAX_AQ_BUF_SIZE;
399	}
400
401	veal = kzalloc(len, GFP_ATOMIC);
402	if (!veal)
403		return;
404
405	veal->vsi_id = adapter->vsi_res->vsi_id;
406	veal->num_elements = count;
407	list_for_each_entry(f, &adapter->mac_filter_list, list) {
408		if (f->add) {
409			ether_addr_copy(veal->list[i].addr, f->macaddr);
410			i++;
411			f->add = false;
412		}
413	}
414	adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
415	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
416			   (u8 *)veal, len);
417	kfree(veal);
418}
419
420/**
421 * i40evf_del_ether_addrs
422 * @adapter: adapter structure
423 * @addrs: the MAC address filters to remove (contiguous)
424 * @count: number of filtes
425 *
426 * Request that the PF remove one or more addresses from our filters.
427 **/
428void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
429{
430	struct i40e_virtchnl_ether_addr_list *veal;
431	struct i40evf_mac_filter *f, *ftmp;
432	int len, i = 0, count = 0;
433
434	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
435		/* bail because we already have a command pending */
436		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
437			__func__, adapter->current_op);
438		return;
439	}
440	list_for_each_entry(f, &adapter->mac_filter_list, list) {
441		if (f->remove)
442			count++;
443	}
444	if (!count) {
445		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
446		return;
447	}
448	adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
449
450	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
451	      (count * sizeof(struct i40e_virtchnl_ether_addr));
452	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
453		dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
454			 __func__);
455		count = (I40EVF_MAX_AQ_BUF_SIZE -
456			 sizeof(struct i40e_virtchnl_ether_addr_list)) /
457			sizeof(struct i40e_virtchnl_ether_addr);
458		len = I40EVF_MAX_AQ_BUF_SIZE;
459	}
460	veal = kzalloc(len, GFP_ATOMIC);
461	if (!veal)
462		return;
463
464	veal->vsi_id = adapter->vsi_res->vsi_id;
465	veal->num_elements = count;
466	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
467		if (f->remove) {
468			ether_addr_copy(veal->list[i].addr, f->macaddr);
469			i++;
470			list_del(&f->list);
471			kfree(f);
472		}
473	}
474	adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
475	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
476			   (u8 *)veal, len);
477	kfree(veal);
478}
479
480/**
481 * i40evf_add_vlans
482 * @adapter: adapter structure
483 * @vlans: the VLANs to add
484 * @count: number of VLANs
485 *
486 * Request that the PF add one or more VLAN filters to our VSI.
487 **/
488void i40evf_add_vlans(struct i40evf_adapter *adapter)
489{
490	struct i40e_virtchnl_vlan_filter_list *vvfl;
491	int len, i = 0, count = 0;
492	struct i40evf_vlan_filter *f;
493
494	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
495		/* bail because we already have a command pending */
496		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
497			__func__, adapter->current_op);
498		return;
499	}
500
501	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
502		if (f->add)
503			count++;
504	}
505	if (!count) {
506		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
507		return;
508	}
509	adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
510
511	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
512	      (count * sizeof(u16));
513	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
514		dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
515			 __func__);
516		count = (I40EVF_MAX_AQ_BUF_SIZE -
517			 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
518			sizeof(u16);
519		len = I40EVF_MAX_AQ_BUF_SIZE;
520	}
521	vvfl = kzalloc(len, GFP_ATOMIC);
522	if (!vvfl)
523		return;
524
525	vvfl->vsi_id = adapter->vsi_res->vsi_id;
526	vvfl->num_elements = count;
527	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
528		if (f->add) {
529			vvfl->vlan_id[i] = f->vlan;
530			i++;
531			f->add = false;
532		}
533	}
534	adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
535	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
536	kfree(vvfl);
537}
538
539/**
540 * i40evf_del_vlans
541 * @adapter: adapter structure
542 * @vlans: the VLANs to remove
543 * @count: number of VLANs
544 *
545 * Request that the PF remove one or more VLAN filters from our VSI.
546 **/
547void i40evf_del_vlans(struct i40evf_adapter *adapter)
548{
549	struct i40e_virtchnl_vlan_filter_list *vvfl;
550	struct i40evf_vlan_filter *f, *ftmp;
551	int len, i = 0, count = 0;
552
553	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
554		/* bail because we already have a command pending */
555		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
556			__func__, adapter->current_op);
557		return;
558	}
559
560	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
561		if (f->remove)
562			count++;
563	}
564	if (!count) {
565		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
566		return;
567	}
568	adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
569
570	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
571	      (count * sizeof(u16));
572	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
573		dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
574			 __func__);
575		count = (I40EVF_MAX_AQ_BUF_SIZE -
576			 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
577			sizeof(u16);
578		len = I40EVF_MAX_AQ_BUF_SIZE;
579	}
580	vvfl = kzalloc(len, GFP_ATOMIC);
581	if (!vvfl)
582		return;
583
584	vvfl->vsi_id = adapter->vsi_res->vsi_id;
585	vvfl->num_elements = count;
586	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
587		if (f->remove) {
588			vvfl->vlan_id[i] = f->vlan;
589			i++;
590			list_del(&f->list);
591			kfree(f);
592		}
593	}
594	adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
595	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
596	kfree(vvfl);
597}
598
599/**
600 * i40evf_set_promiscuous
601 * @adapter: adapter structure
602 * @flags: bitmask to control unicast/multicast promiscuous.
603 *
604 * Request that the PF enable promiscuous mode for our VSI.
605 **/
606void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
607{
608	struct i40e_virtchnl_promisc_info vpi;
609
610	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
611		/* bail because we already have a command pending */
612		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
613			__func__, adapter->current_op);
614		return;
615	}
616	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
617	vpi.vsi_id = adapter->vsi_res->vsi_id;
618	vpi.flags = flags;
619	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
620			   (u8 *)&vpi, sizeof(vpi));
621}
622
623/**
624 * i40evf_request_stats
625 * @adapter: adapter structure
626 *
627 * Request VSI statistics from PF.
628 **/
629void i40evf_request_stats(struct i40evf_adapter *adapter)
630{
631	struct i40e_virtchnl_queue_select vqs;
632
633	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
634		/* no error message, this isn't crucial */
635		return;
636	}
637	adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
638	vqs.vsi_id = adapter->vsi_res->vsi_id;
639	/* queue maps are ignored for this message - only the vsi is used */
640	if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
641			       (u8 *)&vqs, sizeof(vqs)))
642		/* if the request failed, don't lock out others */
643		adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
644}
645/**
646 * i40evf_request_reset
647 * @adapter: adapter structure
648 *
649 * Request that the PF reset this VF. No response is expected.
650 **/
651void i40evf_request_reset(struct i40evf_adapter *adapter)
652{
653	/* Don't check CURRENT_OP - this is always higher priority */
654	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
655	adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
656}
657
658/**
659 * i40evf_virtchnl_completion
660 * @adapter: adapter structure
661 * @v_opcode: opcode sent by PF
662 * @v_retval: retval sent by PF
663 * @msg: message sent by PF
664 * @msglen: message length
665 *
666 * Asynchronous completion function for admin queue messages. Rather than busy
667 * wait, we fire off our requests and assume that no errors will be returned.
668 * This function handles the reply messages.
669 **/
670void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
671				enum i40e_virtchnl_ops v_opcode,
672				i40e_status v_retval,
673				u8 *msg, u16 msglen)
674{
675	struct net_device *netdev = adapter->netdev;
676
677	if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
678		struct i40e_virtchnl_pf_event *vpe =
679			(struct i40e_virtchnl_pf_event *)msg;
680		switch (vpe->event) {
681		case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
682			adapter->link_up =
683				vpe->event_data.link_event.link_status;
684			if (adapter->link_up && !netif_carrier_ok(netdev)) {
685				dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
686				netif_carrier_on(netdev);
687				netif_tx_wake_all_queues(netdev);
688			} else if (!adapter->link_up) {
689				dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
690				netif_carrier_off(netdev);
691				netif_tx_stop_all_queues(netdev);
692			}
693			break;
694		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
695			dev_info(&adapter->pdev->dev, "PF reset warning received\n");
696			if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
697				adapter->flags |= I40EVF_FLAG_RESET_PENDING;
698				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
699				schedule_work(&adapter->reset_task);
700			}
701			break;
702		default:
703			dev_err(&adapter->pdev->dev,
704				"%s: Unknown event %d from pf\n",
705				__func__, vpe->event);
706			break;
707		}
708		return;
709	}
710	if (v_retval) {
711		dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
712			__func__, v_retval, v_opcode);
713	}
714	switch (v_opcode) {
715	case I40E_VIRTCHNL_OP_GET_STATS: {
716		struct i40e_eth_stats *stats =
717			(struct i40e_eth_stats *)msg;
718		adapter->net_stats.rx_packets = stats->rx_unicast +
719						 stats->rx_multicast +
720						 stats->rx_broadcast;
721		adapter->net_stats.tx_packets = stats->tx_unicast +
722						 stats->tx_multicast +
723						 stats->tx_broadcast;
724		adapter->net_stats.rx_bytes = stats->rx_bytes;
725		adapter->net_stats.tx_bytes = stats->tx_bytes;
726		adapter->net_stats.tx_errors = stats->tx_errors;
727		adapter->net_stats.rx_dropped = stats->rx_discards;
728		adapter->net_stats.tx_dropped = stats->tx_discards;
729		adapter->current_stats = *stats;
730		}
731		break;
732	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
733		/* enable transmits */
734		i40evf_irq_enable(adapter, true);
735		netif_tx_start_all_queues(adapter->netdev);
736		netif_carrier_on(adapter->netdev);
737		break;
738	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
739		i40evf_free_all_tx_resources(adapter);
740		i40evf_free_all_rx_resources(adapter);
741		break;
742	case I40E_VIRTCHNL_OP_VERSION:
743	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
744	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
745		/* Don't display an error if we get these out of sequence.
746		 * If the firmware needed to get kicked, we'll get these and
747		 * it's no problem.
748		 */
749		if (v_opcode != adapter->current_op)
750			return;
751		break;
752	default:
753		if (v_opcode != adapter->current_op)
754			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
755				 adapter->current_op, v_opcode);
756		break;
757	} /* switch v_opcode */
758	adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
759}
760