This source file includes following definitions.
- ice_err_to_virt_err
- ice_vc_vf_broadcast
- ice_set_pfe_link
- ice_set_pfe_link_forced
- ice_vc_notify_vf_link_state
- ice_free_vf_res
- ice_dis_vf_mappings
- ice_sriov_free_msix_res
- ice_set_vf_state_qs_dis
- ice_dis_vf_qs
- ice_free_vfs
- ice_trigger_vf_reset
- ice_vsi_set_pvid_fill_ctxt
- ice_vsi_kill_pvid_fill_ctxt
- ice_vsi_manage_pvid
- ice_vf_vsi_setup
- ice_calc_vf_first_vector_idx
- ice_alloc_vsi_res
- ice_alloc_vf_res
- ice_ena_vf_mappings
- ice_determine_res
- ice_calc_vf_reg_idx
- ice_get_max_valid_res_idx
- ice_sriov_set_msix_res
- ice_check_avail_res
- ice_cleanup_and_realloc_vf
- ice_vf_set_vsi_promisc
- ice_config_res_vfs
- ice_reset_all_vfs
- ice_reset_vf
- ice_vc_notify_link_state
- ice_vc_notify_reset
- ice_vc_notify_vf_reset
- ice_alloc_vfs
- ice_pf_state_is_nominal
- ice_pci_sriov_ena
- ice_sriov_configure
- ice_process_vflr_event
- ice_vc_dis_vf
- ice_vc_send_msg_to_vf
- ice_vc_get_ver_msg
- ice_vc_get_vf_res_msg
- ice_vc_reset_vf_msg
- ice_find_vsi_from_id
- ice_vc_isvalid_vsi_id
- ice_vc_isvalid_q_id
- ice_vc_isvalid_ring_len
- ice_vc_config_rss_key
- ice_vc_config_rss_lut
- ice_vc_get_stats_msg
- ice_vc_ena_qs_msg
- ice_vc_dis_qs_msg
- ice_vc_cfg_irq_map_msg
- ice_vc_cfg_qs_msg
- ice_is_vf_trusted
- ice_can_vf_change_mac
- ice_vc_handle_mac_addr_msg
- ice_vc_add_mac_addr_msg
- ice_vc_del_mac_addr_msg
- ice_vc_request_qs_msg
- ice_set_vf_port_vlan
- ice_vc_process_vlan_msg
- ice_vc_add_vlan_msg
- ice_vc_remove_vlan_msg
- ice_vc_ena_vlan_stripping
- ice_vc_dis_vlan_stripping
- ice_vc_process_vf_msg
- ice_get_vf_cfg
- ice_set_vf_spoofchk
- ice_set_vf_mac
- ice_set_vf_trust
- ice_set_vf_link_state
1
2
3
4 #include "ice.h"
5 #include "ice_lib.h"
6
7
8
9
10
11 static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
12 {
13 switch (ice_err) {
14 case ICE_SUCCESS:
15 return VIRTCHNL_STATUS_SUCCESS;
16 case ICE_ERR_BAD_PTR:
17 case ICE_ERR_INVAL_SIZE:
18 case ICE_ERR_DEVICE_NOT_SUPPORTED:
19 case ICE_ERR_PARAM:
20 case ICE_ERR_CFG:
21 return VIRTCHNL_STATUS_ERR_PARAM;
22 case ICE_ERR_NO_MEMORY:
23 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
24 case ICE_ERR_NOT_READY:
25 case ICE_ERR_RESET_FAILED:
26 case ICE_ERR_FW_API_VER:
27 case ICE_ERR_AQ_ERROR:
28 case ICE_ERR_AQ_TIMEOUT:
29 case ICE_ERR_AQ_FULL:
30 case ICE_ERR_AQ_NO_WORK:
31 case ICE_ERR_AQ_EMPTY:
32 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
33 default:
34 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
35 }
36 }
37
38
39
40
41
42
43
44
45
46 static void
47 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
48 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
49 {
50 struct ice_hw *hw = &pf->hw;
51 struct ice_vf *vf = pf->vf;
52 int i;
53
54 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
55
56 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
57 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
58 continue;
59
60
61
62
63 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
64 msglen, NULL);
65 }
66 }
67
68
69
70
71
72
73
74
75 static void
76 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
77 int ice_link_speed, bool link_up)
78 {
79 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
80 pfe->event_data.link_event_adv.link_status = link_up;
81
82 pfe->event_data.link_event_adv.link_speed =
83 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
84 } else {
85 pfe->event_data.link_event.link_status = link_up;
86
87 pfe->event_data.link_event.link_speed =
88 (enum virtchnl_link_speed)
89 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
90 }
91 }
92
93
94
95
96
97
98
99 static void
100 ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
101 bool link_up)
102 {
103 u16 link_speed;
104
105 if (link_up)
106 link_speed = ICE_AQ_LINK_SPEED_100GB;
107 else
108 link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
109
110 ice_set_pfe_link(vf, pfe, link_speed, link_up);
111 }
112
113
114
115
116
117
118
119 static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
120 {
121 struct virtchnl_pf_event pfe = { 0 };
122 struct ice_link_status *ls;
123 struct ice_pf *pf = vf->pf;
124 struct ice_hw *hw;
125
126 hw = &pf->hw;
127 ls = &hw->port_info->phy.link_info;
128
129 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
130 pfe.severity = PF_EVENT_SEVERITY_INFO;
131
132
133 if (!vf->num_qs_ena)
134 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
135 else if (vf->link_forced)
136 ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
137 else
138 ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
139 ICE_AQ_LINK_UP);
140
141 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
142 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
143 sizeof(pfe), NULL);
144 }
145
146
147
148
149
150 static void ice_free_vf_res(struct ice_vf *vf)
151 {
152 struct ice_pf *pf = vf->pf;
153 int i, last_vector_idx;
154
155
156
157
158 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
159
160
161 if (vf->lan_vsi_idx) {
162 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
163 vf->lan_vsi_idx = 0;
164 vf->lan_vsi_num = 0;
165 vf->num_mac = 0;
166 }
167
168 last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
169
170 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
171 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
172 ice_flush(&pf->hw);
173 }
174
175 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
176 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
177 }
178
179
180
181
182
183 static void ice_dis_vf_mappings(struct ice_vf *vf)
184 {
185 struct ice_pf *pf = vf->pf;
186 struct ice_vsi *vsi;
187 int first, last, v;
188 struct ice_hw *hw;
189
190 hw = &pf->hw;
191 vsi = pf->vsi[vf->lan_vsi_idx];
192
193 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
194 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
195
196 first = vf->first_vector_idx;
197 last = first + pf->num_vf_msix - 1;
198 for (v = first; v <= last; v++) {
199 u32 reg;
200
201 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
202 GLINT_VECT2FUNC_IS_PF_M) |
203 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
204 GLINT_VECT2FUNC_PF_NUM_M));
205 wr32(hw, GLINT_VECT2FUNC(v), reg);
206 }
207
208 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
209 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
210 else
211 dev_err(&pf->pdev->dev,
212 "Scattered mode for VF Tx queues is not yet implemented\n");
213
214 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
215 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
216 else
217 dev_err(&pf->pdev->dev,
218 "Scattered mode for VF Rx queues is not yet implemented\n");
219 }
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234 static int ice_sriov_free_msix_res(struct ice_pf *pf)
235 {
236 struct ice_res_tracker *res;
237
238 if (!pf)
239 return -EINVAL;
240
241 res = pf->irq_tracker;
242 if (!res)
243 return -EINVAL;
244
245
246 if (pf->sriov_base_vector < res->num_entries) {
247 res->end = res->num_entries;
248 pf->num_avail_sw_msix +=
249 res->num_entries - pf->sriov_base_vector;
250 }
251
252 pf->sriov_base_vector = 0;
253
254 return 0;
255 }
256
257
258
259
260
261 void ice_set_vf_state_qs_dis(struct ice_vf *vf)
262 {
263
264 bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
265 bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
266 vf->num_qs_ena = 0;
267 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
268 }
269
270
271
272
273
274 static void ice_dis_vf_qs(struct ice_vf *vf)
275 {
276 struct ice_pf *pf = vf->pf;
277 struct ice_vsi *vsi;
278
279 vsi = pf->vsi[vf->lan_vsi_idx];
280
281 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
282 ice_vsi_stop_rx_rings(vsi);
283 ice_set_vf_state_qs_dis(vf);
284 }
285
286
287
288
289
290 void ice_free_vfs(struct ice_pf *pf)
291 {
292 struct ice_hw *hw = &pf->hw;
293 int tmp, i;
294
295 if (!pf->vf)
296 return;
297
298 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
299 usleep_range(1000, 2000);
300
301
302 for (i = 0; i < pf->num_alloc_vfs; i++)
303 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
304 ice_dis_vf_qs(&pf->vf[i]);
305
306
307
308
309
310 if (!pci_vfs_assigned(pf->pdev))
311 pci_disable_sriov(pf->pdev);
312 else
313 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
314
315 tmp = pf->num_alloc_vfs;
316 pf->num_vf_qps = 0;
317 pf->num_alloc_vfs = 0;
318 for (i = 0; i < tmp; i++) {
319 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
320
321 ice_dis_vf_mappings(&pf->vf[i]);
322 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
323 ice_free_vf_res(&pf->vf[i]);
324 }
325 }
326
327 if (ice_sriov_free_msix_res(pf))
328 dev_err(&pf->pdev->dev,
329 "Failed to free MSIX resources used by SR-IOV\n");
330
331 devm_kfree(&pf->pdev->dev, pf->vf);
332 pf->vf = NULL;
333
334
335
336
337
338 if (!pci_vfs_assigned(pf->pdev)) {
339 int vf_id;
340
341
342
343
344 for (vf_id = 0; vf_id < tmp; vf_id++) {
345 u32 reg_idx, bit_idx;
346
347 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
348 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
349 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
350 }
351 }
352 clear_bit(__ICE_VF_DIS, pf->state);
353 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
354 }
355
356
357
358
359
360
361
362
363
364
365
366 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
367 {
368 struct ice_pf *pf = vf->pf;
369 u32 reg, reg_idx, bit_idx;
370 struct ice_hw *hw;
371 int vf_abs_id, i;
372
373 hw = &pf->hw;
374 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
375
376
377 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
378
379
380
381
382
383
384
385 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
386
387
388
389
390
391
392 if (!is_pfr)
393 wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
394
395
396
397
398 if (!is_vflr) {
399
400 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
401 reg |= VPGEN_VFRTRIG_VFSWR_M;
402 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
403 }
404
405 reg_idx = (vf_abs_id) / 32;
406 bit_idx = (vf_abs_id) % 32;
407 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
408 ice_flush(hw);
409
410 wr32(hw, PF_PCI_CIAA,
411 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
412 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
413 reg = rd32(hw, PF_PCI_CIAD);
414
415 if ((reg & VF_TRANS_PENDING_M) == 0)
416 break;
417
418 dev_err(&pf->pdev->dev,
419 "VF %d PCI transactions stuck\n", vf->vf_id);
420 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
421 }
422 }
423
424
425
426
427
428
429 static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
430 {
431 ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
432 ICE_AQ_VSI_PVLAN_INSERT_PVID |
433 ICE_AQ_VSI_VLAN_EMOD_STR);
434 ctxt->info.pvid = cpu_to_le16(vid);
435 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
436 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
437 ICE_AQ_VSI_PROP_SW_VALID);
438 }
439
440
441
442
443
444 static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
445 {
446 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
447 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
448 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
449 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
450 ICE_AQ_VSI_PROP_SW_VALID);
451 }
452
453
454
455
456
457
458
459 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
460 {
461 struct device *dev = &vsi->back->pdev->dev;
462 struct ice_hw *hw = &vsi->back->hw;
463 struct ice_vsi_ctx *ctxt;
464 enum ice_status status;
465 int ret = 0;
466
467 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
468 if (!ctxt)
469 return -ENOMEM;
470
471 ctxt->info = vsi->info;
472 if (enable)
473 ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
474 else
475 ice_vsi_kill_pvid_fill_ctxt(ctxt);
476
477 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
478 if (status) {
479 dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
480 status, hw->adminq.sq_last_status);
481 ret = -EIO;
482 goto out;
483 }
484
485 vsi->info = ctxt->info;
486 out:
487 devm_kfree(dev, ctxt);
488 return ret;
489 }
490
491
492
493
494
495
496
497
498
499
500 static struct ice_vsi *
501 ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
502 {
503 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
504 }
505
506
507
508
509
510
511
512
513
514
515
516
517
518 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
519 {
520 return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
521 }
522
523
524
525
526
527
528
529 static int ice_alloc_vsi_res(struct ice_vf *vf)
530 {
531 struct ice_pf *pf = vf->pf;
532 LIST_HEAD(tmp_add_list);
533 u8 broadcast[ETH_ALEN];
534 struct ice_vsi *vsi;
535 int status = 0;
536
537
538 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
539
540 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
541 if (!vsi) {
542 dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
543 return -ENOMEM;
544 }
545
546 vf->lan_vsi_idx = vsi->idx;
547 vf->lan_vsi_num = vsi->vsi_num;
548
549
550 if (vf->port_vlan_id) {
551 ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
552 ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
553 }
554
555 eth_broadcast_addr(broadcast);
556
557 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
558 if (status)
559 goto ice_alloc_vsi_res_exit;
560
561 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
562 status = ice_add_mac_to_list(vsi, &tmp_add_list,
563 vf->dflt_lan_addr.addr);
564 if (status)
565 goto ice_alloc_vsi_res_exit;
566 }
567
568 status = ice_add_mac(&pf->hw, &tmp_add_list);
569 if (status)
570 dev_err(&pf->pdev->dev,
571 "could not add mac filters error %d\n", status);
572 else
573 vf->num_mac = 1;
574
575
576
577
578
579
580
581 ice_alloc_vsi_res_exit:
582 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
583 return status;
584 }
585
586
587
588
589
590 static int ice_alloc_vf_res(struct ice_vf *vf)
591 {
592 struct ice_pf *pf = vf->pf;
593 int tx_rx_queue_left;
594 int status;
595
596
597
598
599 tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
600 ice_get_avail_rxq_count(pf));
601 tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
602 if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
603 vf->num_req_qs != vf->num_vf_qs)
604 vf->num_vf_qs = vf->num_req_qs;
605
606
607 status = ice_alloc_vsi_res(vf);
608 if (status)
609 goto ice_alloc_vf_res_exit;
610
611 if (vf->trusted)
612 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
613 else
614 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
615
616
617 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
618
619 return status;
620
621 ice_alloc_vf_res_exit:
622 ice_free_vf_res(vf);
623 return status;
624 }
625
626
627
628
629
630
631
632
633 static void ice_ena_vf_mappings(struct ice_vf *vf)
634 {
635 int abs_vf_id, abs_first, abs_last;
636 struct ice_pf *pf = vf->pf;
637 struct ice_vsi *vsi;
638 int first, last, v;
639 struct ice_hw *hw;
640 u32 reg;
641
642 hw = &pf->hw;
643 vsi = pf->vsi[vf->lan_vsi_idx];
644 first = vf->first_vector_idx;
645 last = (first + pf->num_vf_msix) - 1;
646 abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
647 abs_last = (abs_first + pf->num_vf_msix) - 1;
648 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
649
650
651 reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
652 ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
653 VPINT_ALLOC_VALID_M);
654 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
655
656 reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
657 & VPINT_ALLOC_PCI_FIRST_M) |
658 ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
659 VPINT_ALLOC_PCI_VALID_M);
660 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
661
662 for (v = first; v <= last; v++) {
663 reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
664 GLINT_VECT2FUNC_VF_NUM_M) |
665 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
666 GLINT_VECT2FUNC_PF_NUM_M));
667 wr32(hw, GLINT_VECT2FUNC(v), reg);
668 }
669
670
671
672
673 wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
674
675 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
676
677
678 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
679
680
681
682
683 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
684 VPLAN_TX_QBASE_VFFIRSTQ_M) |
685 (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
686 VPLAN_TX_QBASE_VFNUMQ_M));
687 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
688 } else {
689 dev_err(&pf->pdev->dev,
690 "Scattered mode for VF Tx queues is not yet implemented\n");
691 }
692
693
694 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
695
696
697 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
698
699
700
701
702 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
703 VPLAN_RX_QBASE_VFFIRSTQ_M) |
704 (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
705 VPLAN_RX_QBASE_VFNUMQ_M));
706 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
707 } else {
708 dev_err(&pf->pdev->dev,
709 "Scattered mode for VF Rx queues is not yet implemented\n");
710 }
711 }
712
713
714
715
716
717
718
719
720
721
722
723 static int
724 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
725 {
726 bool checked_min_res = false;
727 int res;
728
729
730
731
732
733
734
735
736 res = max_res;
737 while ((res >= min_res) && !checked_min_res) {
738 int num_all_res;
739
740 num_all_res = pf->num_alloc_vfs * res;
741 if (num_all_res <= avail_res)
742 return res;
743
744 if (res == min_res)
745 checked_min_res = true;
746
747 res = DIV_ROUND_UP(res, 2);
748 }
749 return 0;
750 }
751
752
753
754
755
756
757 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
758 {
759 struct ice_pf *pf;
760
761 if (!vf || !q_vector)
762 return -EINVAL;
763
764 pf = vf->pf;
765
766
767 return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
768 q_vector->v_idx + 1;
769 }
770
771
772
773
774
775
776
777
778
779
780 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
781 {
782 int i;
783
784 if (!res)
785 return -EINVAL;
786
787 for (i = res->num_entries - 1; i >= 0; i--)
788 if (res->list[i] & ICE_RES_VALID_BIT)
789 return i;
790
791 return 0;
792 }
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
813 {
814 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
815 u16 pf_total_msix_vectors =
816 pf->hw.func_caps.common_cap.num_msix_vectors;
817 struct ice_res_tracker *res = pf->irq_tracker;
818 int sriov_base_vector;
819
820 if (max_valid_res_idx < 0)
821 return max_valid_res_idx;
822
823 sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
824
825
826
827
828 if (sriov_base_vector <= max_valid_res_idx)
829 return -EINVAL;
830
831 pf->sriov_base_vector = sriov_base_vector;
832
833
834 if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
835 pf->num_avail_sw_msix -=
836 res->num_entries - pf->sriov_base_vector;
837 res->end = pf->sriov_base_vector;
838 }
839
840 return 0;
841 }
842
843
844
845
846
847
848
849
850
851 static int ice_check_avail_res(struct ice_pf *pf)
852 {
853 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
854 u16 num_msix, num_txq, num_rxq, num_avail_msix;
855
856 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
857 return -EINVAL;
858
859
860 num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
861 (max_valid_res_idx + 1);
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878 if (pf->num_alloc_vfs <= 16) {
879 num_msix = ice_determine_res(pf, num_avail_msix,
880 ICE_MAX_INTR_PER_VF,
881 ICE_MIN_INTR_PER_VF);
882 } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
883 num_msix = ice_determine_res(pf, num_avail_msix,
884 ICE_DFLT_INTR_PER_VF,
885 ICE_MIN_INTR_PER_VF);
886 } else {
887 dev_err(&pf->pdev->dev,
888 "Number of VFs %d exceeds max VF count %d\n",
889 pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
890 return -EIO;
891 }
892
893 if (!num_msix)
894 return -EIO;
895
896
897
898
899
900
901
902
903 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
904 ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
905
906 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
907 ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
908
909 if (!num_txq || !num_rxq)
910 return -EIO;
911
912 if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
913 return -EINVAL;
914
915
916
917
918
919 pf->num_vf_qps = min_t(int, num_txq, num_rxq);
920 pf->num_vf_msix = num_msix;
921
922 return 0;
923 }
924
925
926
927
928
929
930
931
932
933
934 static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
935 {
936 struct ice_pf *pf = vf->pf;
937 struct ice_hw *hw;
938 u32 reg;
939
940 hw = &pf->hw;
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
956 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
957 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
958
959
960 if (!ice_alloc_vf_res(vf)) {
961 ice_ena_vf_mappings(vf);
962 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
963 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
964 vf->num_vlan = 0;
965 }
966
967
968
969
970
971 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
972 }
973
974
975
976
977
978
979
980
981
982
983
984 static enum ice_status
985 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
986 bool rm_promisc)
987 {
988 struct ice_pf *pf = vf->pf;
989 enum ice_status status = 0;
990 struct ice_hw *hw;
991
992 hw = &pf->hw;
993 if (vf->num_vlan) {
994 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
995 rm_promisc);
996 } else if (vf->port_vlan_id) {
997 if (rm_promisc)
998 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
999 vf->port_vlan_id);
1000 else
1001 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1002 vf->port_vlan_id);
1003 } else {
1004 if (rm_promisc)
1005 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1006 0);
1007 else
1008 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1009 0);
1010 }
1011
1012 return status;
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024 static bool ice_config_res_vfs(struct ice_pf *pf)
1025 {
1026 struct ice_hw *hw = &pf->hw;
1027 int v;
1028
1029 if (ice_check_avail_res(pf)) {
1030 dev_err(&pf->pdev->dev,
1031 "Cannot allocate VF resources, try with fewer number of VFs\n");
1032 return false;
1033 }
1034
1035
1036 if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
1037 ice_irq_dynamic_ena(hw, NULL, NULL);
1038
1039
1040 for (v = 0; v < pf->num_alloc_vfs; v++) {
1041 struct ice_vf *vf = &pf->vf[v];
1042
1043 vf->num_vf_qs = pf->num_vf_qps;
1044 dev_dbg(&pf->pdev->dev,
1045 "VF-id %d has %d queues configured\n",
1046 vf->vf_id, vf->num_vf_qs);
1047 ice_cleanup_and_realloc_vf(vf);
1048 }
1049
1050 ice_flush(hw);
1051 clear_bit(__ICE_VF_DIS, pf->state);
1052
1053 return true;
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1069 {
1070 struct ice_hw *hw = &pf->hw;
1071 struct ice_vf *vf;
1072 int v, i;
1073
1074
1075 if (!pf->num_alloc_vfs)
1076 return false;
1077
1078
1079 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1080 return false;
1081
1082
1083 for (v = 0; v < pf->num_alloc_vfs; v++)
1084 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1085
1086 for (v = 0; v < pf->num_alloc_vfs; v++) {
1087 struct ice_vsi *vsi;
1088
1089 vf = &pf->vf[v];
1090 vsi = pf->vsi[vf->lan_vsi_idx];
1091 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1092 ice_dis_vf_qs(vf);
1093 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1094 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1095 }
1096
1097
1098
1099
1100
1101
1102
1103 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1104
1105
1106 while (v < pf->num_alloc_vfs) {
1107 u32 reg;
1108
1109 vf = &pf->vf[v];
1110 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1111 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1112
1113 usleep_range(10, 20);
1114 break;
1115 }
1116
1117
1118
1119
1120 v++;
1121 }
1122 }
1123
1124
1125
1126
1127 if (v < pf->num_alloc_vfs)
1128 dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
1129
1130
1131 for (v = 0; v < pf->num_alloc_vfs; v++) {
1132 vf = &pf->vf[v];
1133
1134 ice_free_vf_res(vf);
1135
1136
1137
1138
1139
1140
1141 vf->num_vf_qs = 0;
1142 }
1143
1144 if (ice_sriov_free_msix_res(pf))
1145 dev_err(&pf->pdev->dev,
1146 "Failed to free MSIX resources used by SR-IOV\n");
1147
1148 if (!ice_config_res_vfs(pf))
1149 return false;
1150
1151 return true;
1152 }
1153
1154
1155
1156
1157
1158
1159
1160
1161 static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1162 {
1163 struct ice_pf *pf = vf->pf;
1164 struct ice_vsi *vsi;
1165 struct ice_hw *hw;
1166 bool rsd = false;
1167 u8 promisc_m;
1168 u32 reg;
1169 int i;
1170
1171
1172
1173
1174 if (test_bit(__ICE_VF_DIS, pf->state))
1175 return false;
1176
1177
1178
1179
1180
1181 if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states))
1182 return false;
1183
1184 ice_trigger_vf_reset(vf, is_vflr, false);
1185
1186 vsi = pf->vsi[vf->lan_vsi_idx];
1187
1188 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1189 ice_dis_vf_qs(vf);
1190
1191
1192
1193
1194 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1195 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1196
1197 hw = &pf->hw;
1198
1199
1200
1201 for (i = 0; i < 10; i++) {
1202
1203
1204
1205
1206 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1207 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1208 rsd = true;
1209 break;
1210 }
1211
1212
1213 usleep_range(10, 20);
1214 }
1215
1216
1217
1218
1219 if (!rsd)
1220 dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1221 vf->vf_id);
1222
1223
1224
1225
1226 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1227 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1228 if (vf->port_vlan_id || vf->num_vlan)
1229 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1230 else
1231 promisc_m = ICE_UCAST_PROMISC_BITS;
1232
1233 vsi = pf->vsi[vf->lan_vsi_idx];
1234 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1235 dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
1236 }
1237
1238
1239 ice_free_vf_res(vf);
1240
1241 ice_cleanup_and_realloc_vf(vf);
1242
1243 ice_flush(hw);
1244
1245 return true;
1246 }
1247
1248
1249
1250
1251
1252 void ice_vc_notify_link_state(struct ice_pf *pf)
1253 {
1254 int i;
1255
1256 for (i = 0; i < pf->num_alloc_vfs; i++)
1257 ice_vc_notify_vf_link_state(&pf->vf[i]);
1258 }
1259
1260
1261
1262
1263
1264
1265
1266 void ice_vc_notify_reset(struct ice_pf *pf)
1267 {
1268 struct virtchnl_pf_event pfe;
1269
1270 if (!pf->num_alloc_vfs)
1271 return;
1272
1273 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1274 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1275 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1276 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1277 }
1278
1279
1280
1281
1282
1283 static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1284 {
1285 struct virtchnl_pf_event pfe;
1286
1287
1288 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1289 return;
1290
1291
1292
1293
1294 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1295 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1296 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1297 return;
1298
1299 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1300 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1301 ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1302 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1303 NULL);
1304 }
1305
1306
1307
1308
1309
1310
1311 static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
1312 {
1313 struct ice_hw *hw = &pf->hw;
1314 struct ice_vf *vfs;
1315 int i, ret;
1316
1317
1318 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1319 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1320 set_bit(__ICE_OICR_INTR_DIS, pf->state);
1321 ice_flush(hw);
1322
1323 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1324 if (ret) {
1325 pf->num_alloc_vfs = 0;
1326 goto err_unroll_intr;
1327 }
1328
1329 vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
1330 GFP_KERNEL);
1331 if (!vfs) {
1332 ret = -ENOMEM;
1333 goto err_pci_disable_sriov;
1334 }
1335 pf->vf = vfs;
1336
1337
1338 for (i = 0; i < num_alloc_vfs; i++) {
1339 vfs[i].pf = pf;
1340 vfs[i].vf_sw_id = pf->first_sw;
1341 vfs[i].vf_id = i;
1342
1343
1344 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1345 vfs[i].spoofchk = true;
1346 }
1347 pf->num_alloc_vfs = num_alloc_vfs;
1348
1349
1350 if (!ice_config_res_vfs(pf)) {
1351 ret = -EIO;
1352 goto err_unroll_sriov;
1353 }
1354
1355 return ret;
1356
1357 err_unroll_sriov:
1358 pf->vf = NULL;
1359 devm_kfree(&pf->pdev->dev, vfs);
1360 vfs = NULL;
1361 pf->num_alloc_vfs = 0;
1362 err_pci_disable_sriov:
1363 pci_disable_sriov(pf->pdev);
1364 err_unroll_intr:
1365
1366 ice_irq_dynamic_ena(hw, NULL, NULL);
1367 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1368 return ret;
1369 }
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1383 {
1384 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1385
1386 if (!pf)
1387 return false;
1388
1389 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1390 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1391 return false;
1392
1393 return true;
1394 }
1395
1396
1397
1398
1399
1400
1401 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1402 {
1403 int pre_existing_vfs = pci_num_vf(pf->pdev);
1404 struct device *dev = &pf->pdev->dev;
1405 int err;
1406
1407 if (!ice_pf_state_is_nominal(pf)) {
1408 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1409 return -EBUSY;
1410 }
1411
1412 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1413 dev_err(dev, "This device is not capable of SR-IOV\n");
1414 return -ENODEV;
1415 }
1416
1417 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1418 ice_free_vfs(pf);
1419 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1420 return num_vfs;
1421
1422 if (num_vfs > pf->num_vfs_supported) {
1423 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1424 num_vfs, pf->num_vfs_supported);
1425 return -ENOTSUPP;
1426 }
1427
1428 dev_info(dev, "Allocating %d VFs\n", num_vfs);
1429 err = ice_alloc_vfs(pf, num_vfs);
1430 if (err) {
1431 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1432 return err;
1433 }
1434
1435 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1436 return num_vfs;
1437 }
1438
1439
1440
1441
1442
1443
1444
1445
1446 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1447 {
1448 struct ice_pf *pf = pci_get_drvdata(pdev);
1449
1450 if (ice_is_safe_mode(pf)) {
1451 dev_err(&pf->pdev->dev,
1452 "SR-IOV cannot be configured - Device is in Safe Mode\n");
1453 return -EOPNOTSUPP;
1454 }
1455
1456 if (num_vfs)
1457 return ice_pci_sriov_ena(pf, num_vfs);
1458
1459 if (!pci_vfs_assigned(pdev)) {
1460 ice_free_vfs(pf);
1461 } else {
1462 dev_err(&pf->pdev->dev,
1463 "can't free VFs because some are assigned to VMs.\n");
1464 return -EBUSY;
1465 }
1466
1467 return 0;
1468 }
1469
1470
1471
1472
1473
1474
1475
1476
1477 void ice_process_vflr_event(struct ice_pf *pf)
1478 {
1479 struct ice_hw *hw = &pf->hw;
1480 int vf_id;
1481 u32 reg;
1482
1483 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1484 !pf->num_alloc_vfs)
1485 return;
1486
1487 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1488 struct ice_vf *vf = &pf->vf[vf_id];
1489 u32 reg_idx, bit_idx;
1490
1491 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1492 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1493
1494 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1495 if (reg & BIT(bit_idx))
1496
1497 ice_reset_vf(vf, true);
1498 }
1499 }
1500
1501
1502
1503
1504
1505
1506
1507 static void ice_vc_dis_vf(struct ice_vf *vf)
1508 {
1509 ice_vc_notify_vf_reset(vf);
1510 ice_reset_vf(vf, false);
1511 }
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523 static int
1524 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1525 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1526 {
1527 enum ice_status aq_ret;
1528 struct ice_pf *pf;
1529
1530
1531 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1532 return -EINVAL;
1533
1534 pf = vf->pf;
1535
1536
1537 if (v_retval) {
1538 vf->num_inval_msgs++;
1539 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1540 vf->vf_id, v_opcode, v_retval);
1541 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1542 dev_err(&pf->pdev->dev,
1543 "Number of invalid messages exceeded for VF %d\n",
1544 vf->vf_id);
1545 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1546 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1547 return -EIO;
1548 }
1549 } else {
1550 vf->num_valid_msgs++;
1551
1552 vf->num_inval_msgs = 0;
1553 }
1554
1555 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1556 msg, msglen, NULL);
1557 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1558 dev_info(&pf->pdev->dev,
1559 "Unable to send the message to VF %d ret %d aq_err %d\n",
1560 vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
1561 return -EIO;
1562 }
1563
1564 return 0;
1565 }
1566
1567
1568
1569
1570
1571
1572
1573
1574 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1575 {
1576 struct virtchnl_version_info info = {
1577 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1578 };
1579
1580 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1581
1582 if (VF_IS_V10(&vf->vf_ver))
1583 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1584
1585 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1586 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1587 sizeof(struct virtchnl_version_info));
1588 }
1589
1590
1591
1592
1593
1594
1595
1596
1597 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1598 {
1599 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1600 struct virtchnl_vf_resource *vfres = NULL;
1601 struct ice_pf *pf = vf->pf;
1602 struct ice_vsi *vsi;
1603 int len = 0;
1604 int ret;
1605
1606 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1607 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1608 goto err;
1609 }
1610
1611 len = sizeof(struct virtchnl_vf_resource);
1612
1613 vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
1614 if (!vfres) {
1615 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1616 len = 0;
1617 goto err;
1618 }
1619 if (VF_IS_V11(&vf->vf_ver))
1620 vf->driver_caps = *(u32 *)msg;
1621 else
1622 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1623 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1624 VIRTCHNL_VF_OFFLOAD_VLAN;
1625
1626 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1627 vsi = pf->vsi[vf->lan_vsi_idx];
1628 if (!vsi) {
1629 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1630 goto err;
1631 }
1632
1633 if (!vsi->info.pvid)
1634 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1635
1636 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1637 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1638 } else {
1639 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1640 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1641 else
1642 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1643 }
1644
1645 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1646 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1647
1648 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1649 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1650
1651 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1652 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1653
1654 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1655 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1656
1657 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1658 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1659
1660 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1661 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1662
1663 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1664 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1665
1666 vfres->num_vsis = 1;
1667
1668 vfres->num_queue_pairs = vsi->num_txq;
1669 vfres->max_vectors = pf->num_vf_msix;
1670 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1671 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1672
1673 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1674 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1675 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1676 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1677 vf->dflt_lan_addr.addr);
1678
1679 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1680
1681 err:
1682
1683 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1684 (u8 *)vfres, len);
1685
1686 devm_kfree(&pf->pdev->dev, vfres);
1687 return ret;
1688 }
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1699 {
1700 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1701 ice_reset_vf(vf, false);
1702 }
1703
1704
1705
1706
1707
1708
1709
1710
1711 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1712 {
1713 int i;
1714
1715 ice_for_each_vsi(pf, i)
1716 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1717 return pf->vsi[i];
1718
1719 return NULL;
1720 }
1721
1722
1723
1724
1725
1726
1727
1728
1729 static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1730 {
1731 struct ice_pf *pf = vf->pf;
1732 struct ice_vsi *vsi;
1733
1734 vsi = ice_find_vsi_from_id(pf, vsi_id);
1735
1736 return (vsi && (vsi->vf_id == vf->vf_id));
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
1748 {
1749 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1750
1751 return (vsi && (qid < vsi->alloc_txq));
1752 }
1753
1754
1755
1756
1757
1758
1759
1760
1761 static bool ice_vc_isvalid_ring_len(u16 ring_len)
1762 {
1763 return ring_len == 0 ||
1764 (ring_len >= ICE_MIN_NUM_DESC &&
1765 ring_len <= ICE_MAX_NUM_DESC &&
1766 !(ring_len % ICE_REQ_DESC_MULTIPLE));
1767 }
1768
1769
1770
1771
1772
1773
1774
1775
1776 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1777 {
1778 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1779 struct virtchnl_rss_key *vrk =
1780 (struct virtchnl_rss_key *)msg;
1781 struct ice_pf *pf = vf->pf;
1782 struct ice_vsi *vsi = NULL;
1783
1784 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1785 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1786 goto error_param;
1787 }
1788
1789 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
1790 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1791 goto error_param;
1792 }
1793
1794 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
1795 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1796 goto error_param;
1797 }
1798
1799 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1800 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1801 goto error_param;
1802 }
1803
1804 vsi = pf->vsi[vf->lan_vsi_idx];
1805 if (!vsi) {
1806 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1807 goto error_param;
1808 }
1809
1810 if (ice_set_rss(vsi, vrk->key, NULL, 0))
1811 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1812 error_param:
1813 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1814 NULL, 0);
1815 }
1816
1817
1818
1819
1820
1821
1822
1823
1824 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1825 {
1826 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
1827 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1828 struct ice_pf *pf = vf->pf;
1829 struct ice_vsi *vsi = NULL;
1830
1831 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1832 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1833 goto error_param;
1834 }
1835
1836 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
1837 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1838 goto error_param;
1839 }
1840
1841 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
1842 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1843 goto error_param;
1844 }
1845
1846 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1847 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1848 goto error_param;
1849 }
1850
1851 vsi = pf->vsi[vf->lan_vsi_idx];
1852 if (!vsi) {
1853 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1854 goto error_param;
1855 }
1856
1857 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
1858 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1859 error_param:
1860 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1861 NULL, 0);
1862 }
1863
1864
1865
1866
1867
1868
1869
1870
1871 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1872 {
1873 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1874 struct virtchnl_queue_select *vqs =
1875 (struct virtchnl_queue_select *)msg;
1876 struct ice_eth_stats stats = { 0 };
1877 struct ice_pf *pf = vf->pf;
1878 struct ice_vsi *vsi;
1879
1880 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1881 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1882 goto error_param;
1883 }
1884
1885 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1886 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1887 goto error_param;
1888 }
1889
1890 vsi = pf->vsi[vf->lan_vsi_idx];
1891 if (!vsi) {
1892 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1893 goto error_param;
1894 }
1895
1896 ice_update_eth_stats(vsi);
1897
1898 stats = vsi->eth_stats;
1899
1900 error_param:
1901
1902 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1903 (u8 *)&stats, sizeof(stats));
1904 }
1905
1906
1907
1908
1909
1910
1911
1912
1913 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1914 {
1915 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1916 struct virtchnl_queue_select *vqs =
1917 (struct virtchnl_queue_select *)msg;
1918 struct ice_pf *pf = vf->pf;
1919 struct ice_vsi *vsi;
1920 unsigned long q_map;
1921 u16 vf_q_id;
1922
1923 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1924 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1925 goto error_param;
1926 }
1927
1928 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1929 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1930 goto error_param;
1931 }
1932
1933 if (!vqs->rx_queues && !vqs->tx_queues) {
1934 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1935 goto error_param;
1936 }
1937
1938 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
1939 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
1940 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1941 goto error_param;
1942 }
1943
1944 vsi = pf->vsi[vf->lan_vsi_idx];
1945 if (!vsi) {
1946 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1947 goto error_param;
1948 }
1949
1950
1951
1952
1953
1954 q_map = vqs->rx_queues;
1955 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
1956 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1957 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1958 goto error_param;
1959 }
1960
1961
1962 if (test_bit(vf_q_id, vf->rxq_ena))
1963 continue;
1964
1965 if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
1966 dev_err(&vsi->back->pdev->dev,
1967 "Failed to enable Rx ring %d on VSI %d\n",
1968 vf_q_id, vsi->vsi_num);
1969 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1970 goto error_param;
1971 }
1972
1973 set_bit(vf_q_id, vf->rxq_ena);
1974 vf->num_qs_ena++;
1975 }
1976
1977 vsi = pf->vsi[vf->lan_vsi_idx];
1978 q_map = vqs->tx_queues;
1979 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
1980 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1981 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1982 goto error_param;
1983 }
1984
1985
1986 if (test_bit(vf_q_id, vf->txq_ena))
1987 continue;
1988
1989 set_bit(vf_q_id, vf->txq_ena);
1990 vf->num_qs_ena++;
1991 }
1992
1993
1994 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1995 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1996
1997 error_param:
1998
1999 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2000 NULL, 0);
2001 }
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2012 {
2013 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2014 struct virtchnl_queue_select *vqs =
2015 (struct virtchnl_queue_select *)msg;
2016 struct ice_pf *pf = vf->pf;
2017 struct ice_vsi *vsi;
2018 unsigned long q_map;
2019 u16 vf_q_id;
2020
2021 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2022 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2023 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2024 goto error_param;
2025 }
2026
2027 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2028 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2029 goto error_param;
2030 }
2031
2032 if (!vqs->rx_queues && !vqs->tx_queues) {
2033 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2034 goto error_param;
2035 }
2036
2037 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
2038 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
2039 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2040 goto error_param;
2041 }
2042
2043 vsi = pf->vsi[vf->lan_vsi_idx];
2044 if (!vsi) {
2045 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2046 goto error_param;
2047 }
2048
2049 if (vqs->tx_queues) {
2050 q_map = vqs->tx_queues;
2051
2052 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2053 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2054 struct ice_txq_meta txq_meta = { 0 };
2055
2056 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2057 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2058 goto error_param;
2059 }
2060
2061
2062 if (!test_bit(vf_q_id, vf->txq_ena))
2063 continue;
2064
2065 ice_fill_txq_meta(vsi, ring, &txq_meta);
2066
2067 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2068 ring, &txq_meta)) {
2069 dev_err(&vsi->back->pdev->dev,
2070 "Failed to stop Tx ring %d on VSI %d\n",
2071 vf_q_id, vsi->vsi_num);
2072 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2073 goto error_param;
2074 }
2075
2076
2077 clear_bit(vf_q_id, vf->txq_ena);
2078 vf->num_qs_ena--;
2079 }
2080 }
2081
2082 if (vqs->rx_queues) {
2083 q_map = vqs->rx_queues;
2084
2085 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2086 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2087 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2088 goto error_param;
2089 }
2090
2091
2092 if (!test_bit(vf_q_id, vf->rxq_ena))
2093 continue;
2094
2095 if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
2096 dev_err(&vsi->back->pdev->dev,
2097 "Failed to stop Rx ring %d on VSI %d\n",
2098 vf_q_id, vsi->vsi_num);
2099 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2100 goto error_param;
2101 }
2102
2103
2104 clear_bit(vf_q_id, vf->rxq_ena);
2105 vf->num_qs_ena--;
2106 }
2107 }
2108
2109
2110 if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
2111 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2112
2113 error_param:
2114
2115 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2116 NULL, 0);
2117 }
2118
2119
2120
2121
2122
2123
2124
2125
2126 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2127 {
2128 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2129 struct virtchnl_irq_map_info *irqmap_info;
2130 u16 vsi_id, vsi_q_id, vector_id;
2131 struct virtchnl_vector_map *map;
2132 struct ice_pf *pf = vf->pf;
2133 u16 num_q_vectors_mapped;
2134 struct ice_vsi *vsi;
2135 unsigned long qmap;
2136 int i;
2137
2138 irqmap_info = (struct virtchnl_irq_map_info *)msg;
2139 num_q_vectors_mapped = irqmap_info->num_vectors;
2140
2141
2142
2143
2144
2145 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2146 pf->num_vf_msix < num_q_vectors_mapped ||
2147 !irqmap_info->num_vectors) {
2148 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2149 goto error_param;
2150 }
2151
2152 vsi = pf->vsi[vf->lan_vsi_idx];
2153 if (!vsi) {
2154 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2155 goto error_param;
2156 }
2157
2158 for (i = 0; i < num_q_vectors_mapped; i++) {
2159 struct ice_q_vector *q_vector;
2160
2161 map = &irqmap_info->vecmap[i];
2162
2163 vector_id = map->vector_id;
2164 vsi_id = map->vsi_id;
2165
2166 if (!(vector_id < pf->hw.func_caps.common_cap
2167 .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2168 (!vector_id && (map->rxq_map || map->txq_map))) {
2169 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2170 goto error_param;
2171 }
2172
2173
2174 if (!vector_id)
2175 continue;
2176
2177
2178
2179
2180 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2181 if (!q_vector) {
2182 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2183 goto error_param;
2184 }
2185
2186
2187 qmap = map->rxq_map;
2188 q_vector->num_ring_rx = 0;
2189 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2190 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2191 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2192 goto error_param;
2193 }
2194 q_vector->num_ring_rx++;
2195 q_vector->rx.itr_idx = map->rxitr_idx;
2196 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2197 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2198 q_vector->rx.itr_idx);
2199 }
2200
2201 qmap = map->txq_map;
2202 q_vector->num_ring_tx = 0;
2203 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2204 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2205 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2206 goto error_param;
2207 }
2208 q_vector->num_ring_tx++;
2209 q_vector->tx.itr_idx = map->txitr_idx;
2210 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2211 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2212 q_vector->tx.itr_idx);
2213 }
2214 }
2215
2216 error_param:
2217
2218 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2219 NULL, 0);
2220 }
2221
2222
2223
2224
2225
2226
2227
2228
2229 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2230 {
2231 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2232 struct virtchnl_vsi_queue_config_info *qci =
2233 (struct virtchnl_vsi_queue_config_info *)msg;
2234 struct virtchnl_queue_pair_info *qpi;
2235 u16 num_rxq = 0, num_txq = 0;
2236 struct ice_pf *pf = vf->pf;
2237 struct ice_vsi *vsi;
2238 int i;
2239
2240 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2241 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2242 goto error_param;
2243 }
2244
2245 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2246 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2247 goto error_param;
2248 }
2249
2250 vsi = pf->vsi[vf->lan_vsi_idx];
2251 if (!vsi) {
2252 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2253 goto error_param;
2254 }
2255
2256 if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
2257 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2258 dev_err(&pf->pdev->dev,
2259 "VF-%d requesting more than supported number of queues: %d\n",
2260 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2261 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2262 goto error_param;
2263 }
2264
2265 for (i = 0; i < qci->num_queue_pairs; i++) {
2266 qpi = &qci->qpair[i];
2267 if (qpi->txq.vsi_id != qci->vsi_id ||
2268 qpi->rxq.vsi_id != qci->vsi_id ||
2269 qpi->rxq.queue_id != qpi->txq.queue_id ||
2270 qpi->txq.headwb_enabled ||
2271 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2272 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2273 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2274 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2275 goto error_param;
2276 }
2277
2278 if (qpi->txq.ring_len > 0) {
2279 num_txq++;
2280 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2281 vsi->tx_rings[i]->count = qpi->txq.ring_len;
2282 }
2283
2284
2285 if (qpi->rxq.ring_len > 0) {
2286 num_rxq++;
2287 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2288 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2289
2290 if (qpi->rxq.databuffer_size != 0 &&
2291 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2292 qpi->rxq.databuffer_size < 1024)) {
2293 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2294 goto error_param;
2295 }
2296 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2297 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2298 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2299 qpi->rxq.max_pkt_size < 64) {
2300 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2301 goto error_param;
2302 }
2303 }
2304
2305 vsi->max_frame = qpi->rxq.max_pkt_size;
2306 }
2307
2308
2309
2310
2311 vsi->num_txq = num_txq;
2312 vsi->num_rxq = num_rxq;
2313
2314 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2315 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
2316
2317 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2318 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2319
2320 error_param:
2321
2322 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2323 NULL, 0);
2324 }
2325
2326
2327
2328
2329
2330 static bool ice_is_vf_trusted(struct ice_vf *vf)
2331 {
2332 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2333 }
2334
2335
2336
2337
2338
2339
2340
2341 static bool ice_can_vf_change_mac(struct ice_vf *vf)
2342 {
2343
2344
2345
2346
2347 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
2348 return false;
2349
2350 return true;
2351 }
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361 static int
2362 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
2363 {
2364 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2365 struct virtchnl_ether_addr_list *al =
2366 (struct virtchnl_ether_addr_list *)msg;
2367 struct ice_pf *pf = vf->pf;
2368 enum virtchnl_ops vc_op;
2369 enum ice_status status;
2370 struct ice_vsi *vsi;
2371 int mac_count = 0;
2372 int i;
2373
2374 if (set)
2375 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
2376 else
2377 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
2378
2379 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2380 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2381 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2382 goto handle_mac_exit;
2383 }
2384
2385 if (set && !ice_is_vf_trusted(vf) &&
2386 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
2387 dev_err(&pf->pdev->dev,
2388 "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
2389 vf->vf_id);
2390
2391
2392
2393 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2394 goto handle_mac_exit;
2395 }
2396
2397 vsi = pf->vsi[vf->lan_vsi_idx];
2398 if (!vsi) {
2399 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2400 goto handle_mac_exit;
2401 }
2402
2403 for (i = 0; i < al->num_elements; i++) {
2404 u8 *maddr = al->list[i].addr;
2405
2406 if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
2407 is_broadcast_ether_addr(maddr)) {
2408 if (set) {
2409
2410
2411
2412 dev_info(&pf->pdev->dev,
2413 "MAC %pM already set for VF %d\n",
2414 maddr, vf->vf_id);
2415 continue;
2416 } else {
2417
2418 dev_err(&pf->pdev->dev,
2419 "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
2420 maddr, vf->vf_id);
2421 continue;
2422 }
2423 }
2424
2425
2426 if (is_zero_ether_addr(maddr)) {
2427 dev_err(&pf->pdev->dev,
2428 "invalid MAC %pM provided for VF %d\n",
2429 maddr, vf->vf_id);
2430 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2431 goto handle_mac_exit;
2432 }
2433
2434 if (is_unicast_ether_addr(maddr) &&
2435 !ice_can_vf_change_mac(vf)) {
2436 dev_err(&pf->pdev->dev,
2437 "can't change unicast MAC for untrusted VF %d\n",
2438 vf->vf_id);
2439 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2440 goto handle_mac_exit;
2441 }
2442
2443
2444 status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
2445 if (status == ICE_ERR_DOES_NOT_EXIST ||
2446 status == ICE_ERR_ALREADY_EXISTS) {
2447 dev_info(&pf->pdev->dev,
2448 "can't %s MAC filters %pM for VF %d, error %d\n",
2449 set ? "add" : "remove", maddr, vf->vf_id,
2450 status);
2451 } else if (status) {
2452 dev_err(&pf->pdev->dev,
2453 "can't %s MAC filters for VF %d, error %d\n",
2454 set ? "add" : "remove", vf->vf_id, status);
2455 v_ret = ice_err_to_virt_err(status);
2456 goto handle_mac_exit;
2457 }
2458
2459 mac_count++;
2460 }
2461
2462
2463 if (set)
2464 vf->num_mac += mac_count;
2465 else
2466 vf->num_mac -= mac_count;
2467
2468 handle_mac_exit:
2469
2470 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2471 }
2472
2473
2474
2475
2476
2477
2478
2479
2480 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2481 {
2482 return ice_vc_handle_mac_addr_msg(vf, msg, true);
2483 }
2484
2485
2486
2487
2488
2489
2490
2491
2492 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2493 {
2494 return ice_vc_handle_mac_addr_msg(vf, msg, false);
2495 }
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2508 {
2509 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2510 struct virtchnl_vf_res_request *vfres =
2511 (struct virtchnl_vf_res_request *)msg;
2512 u16 req_queues = vfres->num_queue_pairs;
2513 struct ice_pf *pf = vf->pf;
2514 u16 max_allowed_vf_queues;
2515 u16 tx_rx_queue_left;
2516 u16 cur_queues;
2517
2518 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2519 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2520 goto error_param;
2521 }
2522
2523 cur_queues = vf->num_vf_qs;
2524 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2525 ice_get_avail_rxq_count(pf));
2526 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2527 if (!req_queues) {
2528 dev_err(&pf->pdev->dev,
2529 "VF %d tried to request 0 queues. Ignoring.\n",
2530 vf->vf_id);
2531 } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
2532 dev_err(&pf->pdev->dev,
2533 "VF %d tried to request more than %d queues.\n",
2534 vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
2535 vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
2536 } else if (req_queues > cur_queues &&
2537 req_queues - cur_queues > tx_rx_queue_left) {
2538 dev_warn(&pf->pdev->dev,
2539 "VF %d requested %u more queues, but only %u left.\n",
2540 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2541 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
2542 ICE_MAX_BASE_QS_PER_VF);
2543 } else {
2544
2545 vf->num_req_qs = req_queues;
2546 ice_vc_dis_vf(vf);
2547 dev_info(&pf->pdev->dev,
2548 "VF %d granted request of %u queues.\n",
2549 vf->vf_id, req_queues);
2550 return 0;
2551 }
2552
2553 error_param:
2554
2555 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2556 v_ret, (u8 *)vfres, sizeof(*vfres));
2557 }
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569 int
2570 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
2571 __be16 vlan_proto)
2572 {
2573 u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
2574 struct ice_netdev_priv *np = netdev_priv(netdev);
2575 struct ice_pf *pf = np->vsi->back;
2576 struct ice_vsi *vsi;
2577 struct ice_vf *vf;
2578 int ret = 0;
2579
2580
2581 if (vf_id >= pf->num_alloc_vfs) {
2582 dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
2583 return -EINVAL;
2584 }
2585
2586 if (vlan_id > ICE_MAX_VLANID || qos > 7) {
2587 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2588 return -EINVAL;
2589 }
2590
2591 if (vlan_proto != htons(ETH_P_8021Q)) {
2592 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
2593 return -EPROTONOSUPPORT;
2594 }
2595
2596 vf = &pf->vf[vf_id];
2597 vsi = pf->vsi[vf->lan_vsi_idx];
2598 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2599 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
2600 return -EBUSY;
2601 }
2602
2603 if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
2604
2605 dev_info(&pf->pdev->dev,
2606 "Duplicate pvid %d request\n", vlanprio);
2607 return ret;
2608 }
2609
2610
2611 if (vsi->info.pvid)
2612 ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2613 VLAN_VID_MASK));
2614
2615 if (vlan_id || qos) {
2616 ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
2617 if (ret)
2618 goto error_set_pvid;
2619 } else {
2620 ice_vsi_manage_pvid(vsi, 0, false);
2621 vsi->info.pvid = 0;
2622 }
2623
2624 if (vlan_id) {
2625 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2626 vlan_id, qos, vf_id);
2627
2628
2629 ret = ice_vsi_add_vlan(vsi, vlan_id);
2630 if (ret)
2631 goto error_set_pvid;
2632 }
2633
2634
2635
2636
2637 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2638
2639 error_set_pvid:
2640 return ret;
2641 }
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2652 {
2653 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2654 struct virtchnl_vlan_filter_list *vfl =
2655 (struct virtchnl_vlan_filter_list *)msg;
2656 struct ice_pf *pf = vf->pf;
2657 bool vlan_promisc = false;
2658 struct ice_vsi *vsi;
2659 struct ice_hw *hw;
2660 int status = 0;
2661 u8 promisc_m;
2662 int i;
2663
2664 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2665 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2666 goto error_param;
2667 }
2668
2669 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2670 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2671 goto error_param;
2672 }
2673
2674 if (add_v && !ice_is_vf_trusted(vf) &&
2675 vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2676 dev_info(&pf->pdev->dev,
2677 "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2678 vf->vf_id);
2679
2680
2681
2682 goto error_param;
2683 }
2684
2685 for (i = 0; i < vfl->num_elements; i++) {
2686 if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
2687 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2688 dev_err(&pf->pdev->dev,
2689 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2690 goto error_param;
2691 }
2692 }
2693
2694 hw = &pf->hw;
2695 vsi = pf->vsi[vf->lan_vsi_idx];
2696 if (!vsi) {
2697 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2698 goto error_param;
2699 }
2700
2701 if (vsi->info.pvid) {
2702 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2703 goto error_param;
2704 }
2705
2706 if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
2707 dev_err(&pf->pdev->dev,
2708 "%sable VLAN stripping failed for VSI %i\n",
2709 add_v ? "en" : "dis", vsi->vsi_num);
2710 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2711 goto error_param;
2712 }
2713
2714 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2715 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2716 vlan_promisc = true;
2717
2718 if (add_v) {
2719 for (i = 0; i < vfl->num_elements; i++) {
2720 u16 vid = vfl->vlan_id[i];
2721
2722 if (!ice_is_vf_trusted(vf) &&
2723 vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2724 dev_info(&pf->pdev->dev,
2725 "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2726 vf->vf_id);
2727
2728
2729
2730
2731 goto error_param;
2732 }
2733
2734 if (ice_vsi_add_vlan(vsi, vid)) {
2735 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2736 goto error_param;
2737 }
2738
2739 vf->num_vlan++;
2740
2741 if (!vlan_promisc) {
2742 status = ice_cfg_vlan_pruning(vsi, true, false);
2743 if (status) {
2744 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2745 dev_err(&pf->pdev->dev,
2746 "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2747 vid, status);
2748 goto error_param;
2749 }
2750 } else {
2751
2752 promisc_m = ICE_PROMISC_VLAN_TX |
2753 ICE_PROMISC_VLAN_RX;
2754
2755 status = ice_set_vsi_promisc(hw, vsi->idx,
2756 promisc_m, vid);
2757 if (status) {
2758 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2759 dev_err(&pf->pdev->dev,
2760 "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2761 vid, status);
2762 }
2763 }
2764 }
2765 } else {
2766
2767
2768
2769
2770
2771
2772
2773 int num_vf_vlan;
2774
2775 num_vf_vlan = vf->num_vlan;
2776 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2777 u16 vid = vfl->vlan_id[i];
2778
2779
2780
2781
2782 if (ice_vsi_kill_vlan(vsi, vid)) {
2783 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2784 goto error_param;
2785 }
2786
2787 vf->num_vlan--;
2788
2789 if (!vf->num_vlan)
2790 ice_cfg_vlan_pruning(vsi, false, false);
2791
2792
2793 if (vlan_promisc) {
2794 promisc_m = ICE_PROMISC_VLAN_TX |
2795 ICE_PROMISC_VLAN_RX;
2796
2797 ice_clear_vsi_promisc(hw, vsi->idx,
2798 promisc_m, vid);
2799 }
2800 }
2801 }
2802
2803 error_param:
2804
2805 if (add_v)
2806 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2807 NULL, 0);
2808 else
2809 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2810 NULL, 0);
2811 }
2812
2813
2814
2815
2816
2817
2818
2819
2820 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2821 {
2822 return ice_vc_process_vlan_msg(vf, msg, true);
2823 }
2824
2825
2826
2827
2828
2829
2830
2831
2832 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2833 {
2834 return ice_vc_process_vlan_msg(vf, msg, false);
2835 }
2836
2837
2838
2839
2840
2841
2842
2843 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2844 {
2845 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2846 struct ice_pf *pf = vf->pf;
2847 struct ice_vsi *vsi;
2848
2849 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2850 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2851 goto error_param;
2852 }
2853
2854 vsi = pf->vsi[vf->lan_vsi_idx];
2855 if (ice_vsi_manage_vlan_stripping(vsi, true))
2856 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2857
2858 error_param:
2859 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2860 v_ret, NULL, 0);
2861 }
2862
2863
2864
2865
2866
2867
2868
2869 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2870 {
2871 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2872 struct ice_pf *pf = vf->pf;
2873 struct ice_vsi *vsi;
2874
2875 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2876 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2877 goto error_param;
2878 }
2879
2880 vsi = pf->vsi[vf->lan_vsi_idx];
2881 if (!vsi) {
2882 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2883 goto error_param;
2884 }
2885
2886 if (ice_vsi_manage_vlan_stripping(vsi, false))
2887 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2888
2889 error_param:
2890 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2891 v_ret, NULL, 0);
2892 }
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
2903 {
2904 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
2905 s16 vf_id = le16_to_cpu(event->desc.retval);
2906 u16 msglen = event->msg_len;
2907 u8 *msg = event->msg_buf;
2908 struct ice_vf *vf = NULL;
2909 int err = 0;
2910
2911 if (vf_id >= pf->num_alloc_vfs) {
2912 err = -EINVAL;
2913 goto error_handler;
2914 }
2915
2916 vf = &pf->vf[vf_id];
2917
2918
2919 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
2920 err = -EPERM;
2921 goto error_handler;
2922 }
2923
2924
2925 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2926 if (err) {
2927 if (err == VIRTCHNL_STATUS_ERR_PARAM)
2928 err = -EPERM;
2929 else
2930 err = -EINVAL;
2931 }
2932
2933 error_handler:
2934 if (err) {
2935 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
2936 NULL, 0);
2937 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
2938 vf_id, v_opcode, msglen, err);
2939 return;
2940 }
2941
2942 switch (v_opcode) {
2943 case VIRTCHNL_OP_VERSION:
2944 err = ice_vc_get_ver_msg(vf, msg);
2945 break;
2946 case VIRTCHNL_OP_GET_VF_RESOURCES:
2947 err = ice_vc_get_vf_res_msg(vf, msg);
2948 ice_vc_notify_vf_link_state(vf);
2949 break;
2950 case VIRTCHNL_OP_RESET_VF:
2951 ice_vc_reset_vf_msg(vf);
2952 break;
2953 case VIRTCHNL_OP_ADD_ETH_ADDR:
2954 err = ice_vc_add_mac_addr_msg(vf, msg);
2955 break;
2956 case VIRTCHNL_OP_DEL_ETH_ADDR:
2957 err = ice_vc_del_mac_addr_msg(vf, msg);
2958 break;
2959 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2960 err = ice_vc_cfg_qs_msg(vf, msg);
2961 break;
2962 case VIRTCHNL_OP_ENABLE_QUEUES:
2963 err = ice_vc_ena_qs_msg(vf, msg);
2964 ice_vc_notify_vf_link_state(vf);
2965 break;
2966 case VIRTCHNL_OP_DISABLE_QUEUES:
2967 err = ice_vc_dis_qs_msg(vf, msg);
2968 break;
2969 case VIRTCHNL_OP_REQUEST_QUEUES:
2970 err = ice_vc_request_qs_msg(vf, msg);
2971 break;
2972 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2973 err = ice_vc_cfg_irq_map_msg(vf, msg);
2974 break;
2975 case VIRTCHNL_OP_CONFIG_RSS_KEY:
2976 err = ice_vc_config_rss_key(vf, msg);
2977 break;
2978 case VIRTCHNL_OP_CONFIG_RSS_LUT:
2979 err = ice_vc_config_rss_lut(vf, msg);
2980 break;
2981 case VIRTCHNL_OP_GET_STATS:
2982 err = ice_vc_get_stats_msg(vf, msg);
2983 break;
2984 case VIRTCHNL_OP_ADD_VLAN:
2985 err = ice_vc_add_vlan_msg(vf, msg);
2986 break;
2987 case VIRTCHNL_OP_DEL_VLAN:
2988 err = ice_vc_remove_vlan_msg(vf, msg);
2989 break;
2990 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2991 err = ice_vc_ena_vlan_stripping(vf);
2992 break;
2993 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2994 err = ice_vc_dis_vlan_stripping(vf);
2995 break;
2996 case VIRTCHNL_OP_UNKNOWN:
2997 default:
2998 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
2999 v_opcode, vf_id);
3000 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3001 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3002 NULL, 0);
3003 break;
3004 }
3005 if (err) {
3006
3007
3008
3009 dev_info(&pf->pdev->dev,
3010 "PF failed to honor VF %d, opcode %d, error %d\n",
3011 vf_id, v_opcode, err);
3012 }
3013 }
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023 int
3024 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3025 {
3026 struct ice_netdev_priv *np = netdev_priv(netdev);
3027 struct ice_vsi *vsi = np->vsi;
3028 struct ice_pf *pf = vsi->back;
3029 struct ice_vf *vf;
3030
3031
3032 if (vf_id >= pf->num_alloc_vfs) {
3033 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
3034 return -EINVAL;
3035 }
3036
3037 vf = &pf->vf[vf_id];
3038 vsi = pf->vsi[vf->lan_vsi_idx];
3039
3040 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3041 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
3042 return -EBUSY;
3043 }
3044
3045 ivi->vf = vf_id;
3046 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3047
3048
3049 ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
3050 ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
3051 ICE_VLAN_PRIORITY_S;
3052
3053 ivi->trusted = vf->trusted;
3054 ivi->spoofchk = vf->spoofchk;
3055 if (!vf->link_forced)
3056 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3057 else if (vf->link_up)
3058 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3059 else
3060 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3061 ivi->max_tx_rate = vf->tx_rate;
3062 ivi->min_tx_rate = 0;
3063 return 0;
3064 }
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
3075 {
3076 struct ice_netdev_priv *np = netdev_priv(netdev);
3077 struct ice_vsi *vsi = np->vsi;
3078 struct ice_pf *pf = vsi->back;
3079 struct ice_vsi_ctx *ctx;
3080 enum ice_status status;
3081 struct ice_vf *vf;
3082 int ret = 0;
3083
3084
3085 if (vf_id >= pf->num_alloc_vfs) {
3086 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
3087 return -EINVAL;
3088 }
3089
3090 vf = &pf->vf[vf_id];
3091 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3092 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
3093 return -EBUSY;
3094 }
3095
3096 if (ena == vf->spoofchk) {
3097 dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
3098 ena ? "ON" : "OFF");
3099 return 0;
3100 }
3101
3102 ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
3103 if (!ctx)
3104 return -ENOMEM;
3105
3106 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
3107
3108 if (ena) {
3109 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
3110 ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
3111 }
3112
3113 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3114 if (status) {
3115 dev_dbg(&pf->pdev->dev,
3116 "Error %d, failed to update VSI* parameters\n", status);
3117 ret = -EIO;
3118 goto out;
3119 }
3120
3121 vf->spoofchk = ena;
3122 vsi->info.sec_flags = ctx->info.sec_flags;
3123 vsi->info.sw_flags2 = ctx->info.sw_flags2;
3124 out:
3125 devm_kfree(&pf->pdev->dev, ctx);
3126 return ret;
3127 }
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3138 {
3139 struct ice_netdev_priv *np = netdev_priv(netdev);
3140 struct ice_vsi *vsi = np->vsi;
3141 struct ice_pf *pf = vsi->back;
3142 struct ice_vf *vf;
3143 int ret = 0;
3144
3145
3146 if (vf_id >= pf->num_alloc_vfs) {
3147 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
3148 return -EINVAL;
3149 }
3150
3151 vf = &pf->vf[vf_id];
3152 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3153 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
3154 return -EBUSY;
3155 }
3156
3157 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
3158 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3159 return -EINVAL;
3160 }
3161
3162
3163
3164
3165
3166
3167 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3168 vf->pf_set_mac = true;
3169 netdev_info(netdev,
3170 "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
3171 vf_id, mac);
3172
3173 ice_vc_dis_vf(vf);
3174 return ret;
3175 }
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3186 {
3187 struct ice_netdev_priv *np = netdev_priv(netdev);
3188 struct ice_vsi *vsi = np->vsi;
3189 struct ice_pf *pf = vsi->back;
3190 struct ice_vf *vf;
3191
3192
3193 if (vf_id >= pf->num_alloc_vfs) {
3194 dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
3195 return -EINVAL;
3196 }
3197
3198 vf = &pf->vf[vf_id];
3199 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3200 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
3201 return -EBUSY;
3202 }
3203
3204
3205 if (trusted == vf->trusted)
3206 return 0;
3207
3208 vf->trusted = trusted;
3209 ice_vc_dis_vf(vf);
3210 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
3211 vf_id, trusted ? "" : "un");
3212
3213 return 0;
3214 }
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3225 {
3226 struct ice_netdev_priv *np = netdev_priv(netdev);
3227 struct ice_pf *pf = np->vsi->back;
3228 struct virtchnl_pf_event pfe = { 0 };
3229 struct ice_link_status *ls;
3230 struct ice_vf *vf;
3231 struct ice_hw *hw;
3232
3233 if (vf_id >= pf->num_alloc_vfs) {
3234 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3235 return -EINVAL;
3236 }
3237
3238 vf = &pf->vf[vf_id];
3239 hw = &pf->hw;
3240 ls = &pf->hw.port_info->phy.link_info;
3241
3242 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3243 dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
3244 return -EBUSY;
3245 }
3246
3247 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
3248 pfe.severity = PF_EVENT_SEVERITY_INFO;
3249
3250 switch (link_state) {
3251 case IFLA_VF_LINK_STATE_AUTO:
3252 vf->link_forced = false;
3253 vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
3254 break;
3255 case IFLA_VF_LINK_STATE_ENABLE:
3256 vf->link_forced = true;
3257 vf->link_up = true;
3258 break;
3259 case IFLA_VF_LINK_STATE_DISABLE:
3260 vf->link_forced = true;
3261 vf->link_up = false;
3262 break;
3263 default:
3264 return -EINVAL;
3265 }
3266
3267 if (vf->link_forced)
3268 ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
3269 else
3270 ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
3271
3272
3273 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
3274 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
3275 sizeof(pfe), NULL);
3276
3277 return 0;
3278 }