This source file includes following definitions.
- i40e_vc_vf_broadcast
- i40e_vc_notify_vf_link_state
- i40e_vc_notify_link_state
- i40e_vc_notify_reset
- i40e_vc_notify_vf_reset
- i40e_vc_disable_vf
- i40e_vc_isvalid_vsi_id
- i40e_vc_isvalid_queue_id
- i40e_vc_isvalid_vector_id
- i40e_vc_get_pf_queue_id
- i40e_get_real_pf_qid
- i40e_config_irq_link_list
- i40e_release_iwarp_qvlist
- i40e_config_iwarp_qvlist
- i40e_config_vsi_tx_queue
- i40e_config_vsi_rx_queue
- i40e_alloc_vsi_res
- i40e_map_pf_queues_to_vsi
- i40e_map_pf_to_vf_queues
- i40e_enable_vf_mappings
- i40e_disable_vf_mappings
- i40e_free_vf_res
- i40e_alloc_vf_res
- i40e_quiesce_vf_pci
- i40e_config_vf_promiscuous_mode
- i40e_trigger_vf_reset
- i40e_cleanup_reset_vf
- i40e_reset_vf
- i40e_reset_all_vfs
- i40e_free_vfs
- i40e_alloc_vfs
- i40e_pci_sriov_enable
- i40e_pci_sriov_configure
- i40e_vc_send_msg_to_vf
- i40e_vc_send_resp_to_vf
- i40e_vc_get_version_msg
- i40e_del_qch
- i40e_vc_get_vf_resources_msg
- i40e_vc_reset_vf_msg
- i40e_getnum_vf_vsi_vlan_filters
- i40e_vc_config_promiscuous_mode_msg
- i40e_vc_config_queues_msg
- i40e_validate_queue_map
- i40e_vc_config_irq_map_msg
- i40e_ctrl_vf_tx_rings
- i40e_ctrl_vf_rx_rings
- i40e_vc_validate_vqs_bitmaps
- i40e_vc_enable_queues_msg
- i40e_vc_disable_queues_msg
- i40e_vc_request_queues_msg
- i40e_vc_get_stats_msg
- i40e_check_vf_permission
- i40e_vc_add_mac_addr_msg
- i40e_vc_del_mac_addr_msg
- i40e_vc_add_vlan_msg
- i40e_vc_remove_vlan_msg
- i40e_vc_iwarp_msg
- i40e_vc_iwarp_qvmap_msg
- i40e_vc_config_rss_key
- i40e_vc_config_rss_lut
- i40e_vc_get_rss_hena
- i40e_vc_set_rss_hena
- i40e_vc_enable_vlan_stripping
- i40e_vc_disable_vlan_stripping
- i40e_validate_cloud_filter
- i40e_find_vsi_from_seid
- i40e_del_all_cloud_filters
- i40e_vc_del_cloud_filter
- i40e_vc_add_cloud_filter
- i40e_vc_add_qch_msg
- i40e_vc_del_qch_msg
- i40e_vc_process_vf_msg
- i40e_vc_process_vflr_event
- i40e_validate_vf
- i40e_ndo_set_vf_mac
- i40e_vsi_has_vlans
- i40e_ndo_set_vf_port_vlan
- i40e_ndo_set_vf_bw
- i40e_ndo_get_vf_config
- i40e_ndo_set_vf_link_state
- i40e_ndo_set_vf_spoofchk
- i40e_ndo_set_vf_trust
1
2
3
4 #include "i40e.h"
5
6
7
8
9
10
11
12
13
14
15
16
17
18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
21 u16 msglen)
22 {
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
25 int i;
26
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
32 continue;
33
34
35
36
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
38 msg, msglen, NULL);
39 }
40 }
41
42
43
44
45
46
47
48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
49 {
50 struct virtchnl_pf_event pfe;
51 struct i40e_pf *pf = vf->pf;
52 struct i40e_hw *hw = &pf->hw;
53 struct i40e_link_status *ls = &pf->hw.phy.link_info;
54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
55
56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
57 pfe.severity = PF_EVENT_SEVERITY_INFO;
58
59
60 if (!vf->queues_enabled) {
61 pfe.event_data.link_event.link_status = false;
62 pfe.event_data.link_event.link_speed = 0;
63 } else if (vf->link_forced) {
64 pfe.event_data.link_event.link_status = vf->link_up;
65 pfe.event_data.link_event.link_speed =
66 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
67 } else {
68 pfe.event_data.link_event.link_status =
69 ls->link_info & I40E_AQ_LINK_UP;
70 pfe.event_data.link_event.link_speed =
71 i40e_virtchnl_link_speed(ls->link_speed);
72 }
73
74 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
75 0, (u8 *)&pfe, sizeof(pfe), NULL);
76 }
77
78
79
80
81
82
83
84 void i40e_vc_notify_link_state(struct i40e_pf *pf)
85 {
86 int i;
87
88 for (i = 0; i < pf->num_alloc_vfs; i++)
89 i40e_vc_notify_vf_link_state(&pf->vf[i]);
90 }
91
92
93
94
95
96
97
98 void i40e_vc_notify_reset(struct i40e_pf *pf)
99 {
100 struct virtchnl_pf_event pfe;
101
102 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
103 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
104 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
105 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
106 }
107
108
109
110
111
112
113
114 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
115 {
116 struct virtchnl_pf_event pfe;
117 int abs_vf_id;
118
119
120 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
121 return;
122
123
124 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
125 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
126 return;
127
128 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
129
130 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
131 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
132 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
133 0, (u8 *)&pfe,
134 sizeof(struct virtchnl_pf_event), NULL);
135 }
136
137
138
139
140
141
142
143
144 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
145 {
146 int i;
147
148 i40e_vc_notify_vf_reset(vf);
149
150
151
152
153
154
155 for (i = 0; i < 20; i++) {
156 if (i40e_reset_vf(vf, false))
157 return;
158 usleep_range(10000, 20000);
159 }
160
161 dev_warn(&vf->pf->pdev->dev,
162 "Failed to initiate reset for VF %d after 200 milliseconds\n",
163 vf->vf_id);
164 }
165
166
167
168
169
170
171
172
173 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
174 {
175 struct i40e_pf *pf = vf->pf;
176 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
177
178 return (vsi && (vsi->vf_id == vf->vf_id));
179 }
180
181
182
183
184
185
186
187
188
189 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
190 u16 qid)
191 {
192 struct i40e_pf *pf = vf->pf;
193 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
194
195 return (vsi && (qid < vsi->alloc_queue_pairs));
196 }
197
198
199
200
201
202
203
204
205 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
206 {
207 struct i40e_pf *pf = vf->pf;
208
209 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
210 }
211
212
213
214
215
216
217
218
219
220
221
222 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
223 u8 vsi_queue_id)
224 {
225 struct i40e_pf *pf = vf->pf;
226 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
227 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
228
229 if (!vsi)
230 return pf_queue_id;
231
232 if (le16_to_cpu(vsi->info.mapping_flags) &
233 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
234 pf_queue_id =
235 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
236 else
237 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
238 vsi_queue_id;
239
240 return pf_queue_id;
241 }
242
243
244
245
246
247
248
249
250
251 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
252 {
253 int i;
254
255 if (vf->adq_enabled) {
256
257
258
259
260 for (i = 0; i < vf->num_tc; i++) {
261 if (queue_id < vf->ch[i].num_qps) {
262 vsi_id = vf->ch[i].vsi_id;
263 break;
264 }
265
266
267
268 queue_id -= vf->ch[i].num_qps;
269 }
270 }
271
272 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
273 }
274
275
276
277
278
279
280
281
282
283 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
284 struct virtchnl_vector_map *vecmap)
285 {
286 unsigned long linklistmap = 0, tempmap;
287 struct i40e_pf *pf = vf->pf;
288 struct i40e_hw *hw = &pf->hw;
289 u16 vsi_queue_id, pf_queue_id;
290 enum i40e_queue_type qtype;
291 u16 next_q, vector_id, size;
292 u32 reg, reg_idx;
293 u16 itr_idx = 0;
294
295 vector_id = vecmap->vector_id;
296
297 if (0 == vector_id)
298 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
299 else
300 reg_idx = I40E_VPINT_LNKLSTN(
301 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
302 (vector_id - 1));
303
304 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
305
306 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
307 goto irq_list_done;
308 }
309 tempmap = vecmap->rxq_map;
310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
312 vsi_queue_id));
313 }
314
315 tempmap = vecmap->txq_map;
316 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
317 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
318 vsi_queue_id + 1));
319 }
320
321 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
322 next_q = find_first_bit(&linklistmap, size);
323 if (unlikely(next_q == size))
324 goto irq_list_done;
325
326 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
327 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
328 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
329 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
330
331 wr32(hw, reg_idx, reg);
332
333 while (next_q < size) {
334 switch (qtype) {
335 case I40E_QUEUE_TYPE_RX:
336 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
337 itr_idx = vecmap->rxitr_idx;
338 break;
339 case I40E_QUEUE_TYPE_TX:
340 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
341 itr_idx = vecmap->txitr_idx;
342 break;
343 default:
344 break;
345 }
346
347 next_q = find_next_bit(&linklistmap, size, next_q + 1);
348 if (next_q < size) {
349 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
350 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
351 pf_queue_id = i40e_get_real_pf_qid(vf,
352 vsi_id,
353 vsi_queue_id);
354 } else {
355 pf_queue_id = I40E_QUEUE_END_OF_LIST;
356 qtype = 0;
357 }
358
359
360 reg = (vector_id) |
361 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
362 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
363 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
364 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
365 wr32(hw, reg_idx, reg);
366 }
367
368
369
370
371 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
372 (vector_id == 0)) {
373 reg = rd32(hw, I40E_GLINT_CTL);
374 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
375 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
376 wr32(hw, I40E_GLINT_CTL, reg);
377 }
378 }
379
380 irq_list_done:
381 i40e_flush(hw);
382 }
383
384
385
386
387
388
389 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
390 {
391 struct i40e_pf *pf = vf->pf;
392 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
393 u32 msix_vf;
394 u32 i;
395
396 if (!vf->qvlist_info)
397 return;
398
399 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
400 for (i = 0; i < qvlist_info->num_vectors; i++) {
401 struct virtchnl_iwarp_qv_info *qv_info;
402 u32 next_q_index, next_q_type;
403 struct i40e_hw *hw = &pf->hw;
404 u32 v_idx, reg_idx, reg;
405
406 qv_info = &qvlist_info->qv_info[i];
407 if (!qv_info)
408 continue;
409 v_idx = qv_info->v_idx;
410 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
411
412
413
414 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
415 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
416 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
417 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
418 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
419 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
420
421 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
422 reg = (next_q_index &
423 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
424 (next_q_type <<
425 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
426
427 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
428 }
429 }
430 kfree(vf->qvlist_info);
431 vf->qvlist_info = NULL;
432 }
433
434
435
436
437
438
439
440
441 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
442 struct virtchnl_iwarp_qvlist_info *qvlist_info)
443 {
444 struct i40e_pf *pf = vf->pf;
445 struct i40e_hw *hw = &pf->hw;
446 struct virtchnl_iwarp_qv_info *qv_info;
447 u32 v_idx, i, reg_idx, reg;
448 u32 next_q_idx, next_q_type;
449 u32 msix_vf;
450 int ret = 0;
451
452 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
453
454 if (qvlist_info->num_vectors > msix_vf) {
455 dev_warn(&pf->pdev->dev,
456 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
457 qvlist_info->num_vectors,
458 msix_vf);
459 ret = -EINVAL;
460 goto err_out;
461 }
462
463 kfree(vf->qvlist_info);
464 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
465 qvlist_info->num_vectors - 1),
466 GFP_KERNEL);
467 if (!vf->qvlist_info) {
468 ret = -ENOMEM;
469 goto err_out;
470 }
471 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
472
473 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
474 for (i = 0; i < qvlist_info->num_vectors; i++) {
475 qv_info = &qvlist_info->qv_info[i];
476 if (!qv_info)
477 continue;
478
479
480 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
481 ret = -EINVAL;
482 goto err_free;
483 }
484
485 v_idx = qv_info->v_idx;
486
487 vf->qvlist_info->qv_info[i] = *qv_info;
488
489 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
490
491
492
493
494 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
495 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
496 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
497 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
498 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
499
500 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
501 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
502 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
503 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
504 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
505 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
506 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
507 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
508
509 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
510 reg = (qv_info->ceq_idx &
511 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
512 (I40E_QUEUE_TYPE_PE_CEQ <<
513 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
514 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
515 }
516
517 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
518 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
519 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
520 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
521
522 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
523 }
524 }
525
526 return 0;
527 err_free:
528 kfree(vf->qvlist_info);
529 vf->qvlist_info = NULL;
530 err_out:
531 return ret;
532 }
533
534
535
536
537
538
539
540
541
542
543 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
544 u16 vsi_queue_id,
545 struct virtchnl_txq_info *info)
546 {
547 struct i40e_pf *pf = vf->pf;
548 struct i40e_hw *hw = &pf->hw;
549 struct i40e_hmc_obj_txq tx_ctx;
550 struct i40e_vsi *vsi;
551 u16 pf_queue_id;
552 u32 qtx_ctl;
553 int ret = 0;
554
555 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
556 ret = -ENOENT;
557 goto error_context;
558 }
559 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
560 vsi = i40e_find_vsi_from_id(pf, vsi_id);
561 if (!vsi) {
562 ret = -ENOENT;
563 goto error_context;
564 }
565
566
567 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
568
569
570 tx_ctx.base = info->dma_ring_addr / 128;
571 tx_ctx.qlen = info->ring_len;
572 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
573 tx_ctx.rdylist_act = 0;
574 tx_ctx.head_wb_ena = info->headwb_enabled;
575 tx_ctx.head_wb_addr = info->dma_headwb_addr;
576
577
578 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
579 if (ret) {
580 dev_err(&pf->pdev->dev,
581 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
582 pf_queue_id, ret);
583 ret = -ENOENT;
584 goto error_context;
585 }
586
587
588 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
589 if (ret) {
590 dev_err(&pf->pdev->dev,
591 "Failed to set VF LAN Tx queue context %d error: %d\n",
592 pf_queue_id, ret);
593 ret = -ENOENT;
594 goto error_context;
595 }
596
597
598 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
599 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
600 & I40E_QTX_CTL_PF_INDX_MASK);
601 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
602 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
603 & I40E_QTX_CTL_VFVM_INDX_MASK);
604 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
605 i40e_flush(hw);
606
607 error_context:
608 return ret;
609 }
610
611
612
613
614
615
616
617
618
619
620 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
621 u16 vsi_queue_id,
622 struct virtchnl_rxq_info *info)
623 {
624 struct i40e_pf *pf = vf->pf;
625 struct i40e_hw *hw = &pf->hw;
626 struct i40e_hmc_obj_rxq rx_ctx;
627 u16 pf_queue_id;
628 int ret = 0;
629
630 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
631
632
633 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
634
635
636 rx_ctx.base = info->dma_ring_addr / 128;
637 rx_ctx.qlen = info->ring_len;
638
639 if (info->splithdr_enabled) {
640 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
641 I40E_RX_SPLIT_IP |
642 I40E_RX_SPLIT_TCP_UDP |
643 I40E_RX_SPLIT_SCTP;
644
645 if (info->hdr_size > ((2 * 1024) - 64)) {
646 ret = -EINVAL;
647 goto error_param;
648 }
649 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
650
651
652 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
653 }
654
655
656 if (info->databuffer_size > ((16 * 1024) - 128)) {
657 ret = -EINVAL;
658 goto error_param;
659 }
660 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
661
662
663 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
664 ret = -EINVAL;
665 goto error_param;
666 }
667 rx_ctx.rxmax = info->max_pkt_size;
668
669
670 rx_ctx.dsize = 1;
671
672
673 rx_ctx.lrxqthresh = 1;
674 rx_ctx.crcstrip = 1;
675 rx_ctx.prefena = 1;
676 rx_ctx.l2tsel = 1;
677
678
679 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
680 if (ret) {
681 dev_err(&pf->pdev->dev,
682 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
683 pf_queue_id, ret);
684 ret = -ENOENT;
685 goto error_param;
686 }
687
688
689 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
690 if (ret) {
691 dev_err(&pf->pdev->dev,
692 "Failed to set VF LAN Rx queue context %d error: %d\n",
693 pf_queue_id, ret);
694 ret = -ENOENT;
695 goto error_param;
696 }
697
698 error_param:
699 return ret;
700 }
701
702
703
704
705
706
707
708
709 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
710 {
711 struct i40e_mac_filter *f = NULL;
712 struct i40e_pf *pf = vf->pf;
713 struct i40e_vsi *vsi;
714 u64 max_tx_rate = 0;
715 int ret = 0;
716
717 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
718 vf->vf_id);
719
720 if (!vsi) {
721 dev_err(&pf->pdev->dev,
722 "add vsi failed for VF %d, aq_err %d\n",
723 vf->vf_id, pf->hw.aq.asq_last_status);
724 ret = -ENOENT;
725 goto error_alloc_vsi_res;
726 }
727
728 if (!idx) {
729 u64 hena = i40e_pf_get_default_rss_hena(pf);
730 u8 broadcast[ETH_ALEN];
731
732 vf->lan_vsi_idx = vsi->idx;
733 vf->lan_vsi_id = vsi->id;
734
735
736
737
738
739
740 if (vf->port_vlan_id)
741 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
742
743 spin_lock_bh(&vsi->mac_filter_hash_lock);
744 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
745 f = i40e_add_mac_filter(vsi,
746 vf->default_lan_addr.addr);
747 if (!f)
748 dev_info(&pf->pdev->dev,
749 "Could not add MAC filter %pM for VF %d\n",
750 vf->default_lan_addr.addr, vf->vf_id);
751 }
752 eth_broadcast_addr(broadcast);
753 f = i40e_add_mac_filter(vsi, broadcast);
754 if (!f)
755 dev_info(&pf->pdev->dev,
756 "Could not allocate VF broadcast filter\n");
757 spin_unlock_bh(&vsi->mac_filter_hash_lock);
758 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
759 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
760
761 ret = i40e_sync_vsi_filters(vsi);
762 if (ret)
763 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
764 }
765
766
767 if (vf->adq_enabled) {
768 vf->ch[idx].vsi_idx = vsi->idx;
769 vf->ch[idx].vsi_id = vsi->id;
770 }
771
772
773 if (vf->tx_rate) {
774 max_tx_rate = vf->tx_rate;
775 } else if (vf->ch[idx].max_tx_rate) {
776 max_tx_rate = vf->ch[idx].max_tx_rate;
777 }
778
779 if (max_tx_rate) {
780 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
781 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
782 max_tx_rate, 0, NULL);
783 if (ret)
784 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
785 vf->vf_id, ret);
786 }
787
788 error_alloc_vsi_res:
789 return ret;
790 }
791
792
793
794
795
796
797
798
799 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
800 {
801 struct i40e_pf *pf = vf->pf;
802 struct i40e_hw *hw = &pf->hw;
803 u32 reg, num_tc = 1;
804 u16 vsi_id, qps;
805 int i, j;
806
807 if (vf->adq_enabled)
808 num_tc = vf->num_tc;
809
810 for (i = 0; i < num_tc; i++) {
811 if (vf->adq_enabled) {
812 qps = vf->ch[i].num_qps;
813 vsi_id = vf->ch[i].vsi_id;
814 } else {
815 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
816 vsi_id = vf->lan_vsi_id;
817 }
818
819 for (j = 0; j < 7; j++) {
820 if (j * 2 >= qps) {
821
822 reg = 0x07FF07FF;
823 } else {
824 u16 qid = i40e_vc_get_pf_queue_id(vf,
825 vsi_id,
826 j * 2);
827 reg = qid;
828 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
829 (j * 2) + 1);
830 reg |= qid << 16;
831 }
832 i40e_write_rx_ctl(hw,
833 I40E_VSILAN_QTABLE(j, vsi_id),
834 reg);
835 }
836 }
837 }
838
839
840
841
842
843
844
845
846 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
847 {
848 struct i40e_pf *pf = vf->pf;
849 struct i40e_hw *hw = &pf->hw;
850 u32 reg, total_qps = 0;
851 u32 qps, num_tc = 1;
852 u16 vsi_id, qid;
853 int i, j;
854
855 if (vf->adq_enabled)
856 num_tc = vf->num_tc;
857
858 for (i = 0; i < num_tc; i++) {
859 if (vf->adq_enabled) {
860 qps = vf->ch[i].num_qps;
861 vsi_id = vf->ch[i].vsi_id;
862 } else {
863 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
864 vsi_id = vf->lan_vsi_id;
865 }
866
867 for (j = 0; j < qps; j++) {
868 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
869
870 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
871 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
872 reg);
873 total_qps++;
874 }
875 }
876 }
877
878
879
880
881
882
883
884 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
885 {
886 struct i40e_pf *pf = vf->pf;
887 struct i40e_hw *hw = &pf->hw;
888 u32 reg;
889
890
891
892
893
894 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
895 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
896
897
898 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
899 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
900
901 i40e_map_pf_to_vf_queues(vf);
902 i40e_map_pf_queues_to_vsi(vf);
903
904 i40e_flush(hw);
905 }
906
907
908
909
910
911
912
913 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
914 {
915 struct i40e_pf *pf = vf->pf;
916 struct i40e_hw *hw = &pf->hw;
917 int i;
918
919
920 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
921 for (i = 0; i < I40E_MAX_VSI_QP; i++)
922 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
923 I40E_QUEUE_END_OF_LIST);
924 i40e_flush(hw);
925 }
926
927
928
929
930
931
932
933 static void i40e_free_vf_res(struct i40e_vf *vf)
934 {
935 struct i40e_pf *pf = vf->pf;
936 struct i40e_hw *hw = &pf->hw;
937 u32 reg_idx, reg;
938 int i, j, msix_vf;
939
940
941
942
943 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
944
945
946
947
948 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
949 pf->queues_left += vf->num_queue_pairs -
950 I40E_DEFAULT_QUEUES_PER_VF;
951 }
952
953
954 if (vf->lan_vsi_idx) {
955 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
956 vf->lan_vsi_idx = 0;
957 vf->lan_vsi_id = 0;
958 vf->num_mac = 0;
959 }
960
961
962 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
963 for (j = 0; j < vf->num_tc; j++) {
964
965
966
967
968 if (j)
969 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
970 vf->ch[j].vsi_idx = 0;
971 vf->ch[j].vsi_id = 0;
972 }
973 }
974 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
975
976
977 for (i = 0; i < msix_vf; i++) {
978
979 if (0 == i)
980 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
981 else
982 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
983 (vf->vf_id))
984 + (i - 1));
985 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
986 i40e_flush(hw);
987 }
988
989
990 for (i = 0; i < msix_vf; i++) {
991
992 if (0 == i)
993 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
994 else
995 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
996 (vf->vf_id))
997 + (i - 1));
998 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
999 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1000 wr32(hw, reg_idx, reg);
1001 i40e_flush(hw);
1002 }
1003
1004 vf->num_queue_pairs = 0;
1005 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1006 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1007 }
1008
1009
1010
1011
1012
1013
1014
1015 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1016 {
1017 struct i40e_pf *pf = vf->pf;
1018 int total_queue_pairs = 0;
1019 int ret, idx;
1020
1021 if (vf->num_req_queues &&
1022 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1023 pf->num_vf_qps = vf->num_req_queues;
1024 else
1025 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1026
1027
1028 ret = i40e_alloc_vsi_res(vf, 0);
1029 if (ret)
1030 goto error_alloc;
1031 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1032
1033
1034 if (vf->adq_enabled) {
1035 if (pf->queues_left >=
1036 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1037
1038 for (idx = 1; idx < vf->num_tc; idx++) {
1039 ret = i40e_alloc_vsi_res(vf, idx);
1040 if (ret)
1041 goto error_alloc;
1042 }
1043
1044 total_queue_pairs = I40E_MAX_VF_QUEUES;
1045 } else {
1046 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1047 vf->vf_id);
1048 vf->adq_enabled = false;
1049 }
1050 }
1051
1052
1053
1054
1055
1056
1057 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1058 pf->queues_left -=
1059 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1060
1061 if (vf->trusted)
1062 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1063 else
1064 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1065
1066
1067
1068
1069 vf->num_queue_pairs = total_queue_pairs;
1070
1071
1072 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1073
1074 error_alloc:
1075 if (ret)
1076 i40e_free_vf_res(vf);
1077
1078 return ret;
1079 }
1080
1081 #define VF_DEVICE_STATUS 0xAA
1082 #define VF_TRANS_PENDING_MASK 0x20
1083
1084
1085
1086
1087
1088
1089
1090 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1091 {
1092 struct i40e_pf *pf = vf->pf;
1093 struct i40e_hw *hw = &pf->hw;
1094 int vf_abs_id, i;
1095 u32 reg;
1096
1097 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1098
1099 wr32(hw, I40E_PF_PCI_CIAA,
1100 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1101 for (i = 0; i < 100; i++) {
1102 reg = rd32(hw, I40E_PF_PCI_CIAD);
1103 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1104 return 0;
1105 udelay(1);
1106 }
1107 return -EIO;
1108 }
1109
1110 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1123 u16 vsi_id,
1124 bool allmulti,
1125 bool alluni)
1126 {
1127 struct i40e_pf *pf = vf->pf;
1128 struct i40e_hw *hw = &pf->hw;
1129 struct i40e_mac_filter *f;
1130 i40e_status aq_ret = 0;
1131 struct i40e_vsi *vsi;
1132 int bkt;
1133
1134 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1135 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1136 return I40E_ERR_PARAM;
1137
1138 if (vf->port_vlan_id) {
1139 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1140 allmulti,
1141 vf->port_vlan_id,
1142 NULL);
1143 if (aq_ret) {
1144 int aq_err = pf->hw.aq.asq_last_status;
1145
1146 dev_err(&pf->pdev->dev,
1147 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1148 vf->vf_id,
1149 i40e_stat_str(&pf->hw, aq_ret),
1150 i40e_aq_str(&pf->hw, aq_err));
1151 return aq_ret;
1152 }
1153
1154 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1155 alluni,
1156 vf->port_vlan_id,
1157 NULL);
1158 if (aq_ret) {
1159 int aq_err = pf->hw.aq.asq_last_status;
1160
1161 dev_err(&pf->pdev->dev,
1162 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1163 vf->vf_id,
1164 i40e_stat_str(&pf->hw, aq_ret),
1165 i40e_aq_str(&pf->hw, aq_err));
1166 }
1167 return aq_ret;
1168 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1169 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1170 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1171 continue;
1172 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1173 vsi->seid,
1174 allmulti,
1175 f->vlan,
1176 NULL);
1177 if (aq_ret) {
1178 int aq_err = pf->hw.aq.asq_last_status;
1179
1180 dev_err(&pf->pdev->dev,
1181 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1182 f->vlan,
1183 i40e_stat_str(&pf->hw, aq_ret),
1184 i40e_aq_str(&pf->hw, aq_err));
1185 }
1186
1187 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1188 vsi->seid,
1189 alluni,
1190 f->vlan,
1191 NULL);
1192 if (aq_ret) {
1193 int aq_err = pf->hw.aq.asq_last_status;
1194
1195 dev_err(&pf->pdev->dev,
1196 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1197 f->vlan,
1198 i40e_stat_str(&pf->hw, aq_ret),
1199 i40e_aq_str(&pf->hw, aq_err));
1200 }
1201 }
1202 return aq_ret;
1203 }
1204 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti,
1205 NULL);
1206 if (aq_ret) {
1207 int aq_err = pf->hw.aq.asq_last_status;
1208
1209 dev_err(&pf->pdev->dev,
1210 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1211 vf->vf_id,
1212 i40e_stat_str(&pf->hw, aq_ret),
1213 i40e_aq_str(&pf->hw, aq_err));
1214 return aq_ret;
1215 }
1216
1217 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni,
1218 NULL, true);
1219 if (aq_ret) {
1220 int aq_err = pf->hw.aq.asq_last_status;
1221
1222 dev_err(&pf->pdev->dev,
1223 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1224 vf->vf_id,
1225 i40e_stat_str(&pf->hw, aq_ret),
1226 i40e_aq_str(&pf->hw, aq_err));
1227 }
1228
1229 return aq_ret;
1230 }
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1242 {
1243 struct i40e_pf *pf = vf->pf;
1244 struct i40e_hw *hw = &pf->hw;
1245 u32 reg, reg_idx, bit_idx;
1246
1247
1248 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1249
1250
1251
1252
1253
1254
1255
1256 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1257
1258
1259
1260
1261 if (!flr) {
1262
1263 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1264 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1265 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1266 i40e_flush(hw);
1267 }
1268
1269 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1270 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1271 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1272 i40e_flush(hw);
1273
1274 if (i40e_quiesce_vf_pci(vf))
1275 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1276 vf->vf_id);
1277 }
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1288 {
1289 struct i40e_pf *pf = vf->pf;
1290 struct i40e_hw *hw = &pf->hw;
1291 u32 reg;
1292
1293
1294 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1295
1296
1297 i40e_free_vf_res(vf);
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1310 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1311 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1312
1313
1314 if (!i40e_alloc_vf_res(vf)) {
1315 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1316 i40e_enable_vf_mappings(vf);
1317 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1318 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1319
1320 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1321 &vf->vf_states))
1322 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1323 vf->num_vlan = 0;
1324 }
1325
1326
1327
1328
1329
1330 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1331 }
1332
1333
1334
1335
1336
1337
1338
1339
1340 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1341 {
1342 struct i40e_pf *pf = vf->pf;
1343 struct i40e_hw *hw = &pf->hw;
1344 bool rsd = false;
1345 u32 reg;
1346 int i;
1347
1348
1349
1350
1351 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1352 return false;
1353
1354 i40e_trigger_vf_reset(vf, flr);
1355
1356
1357
1358
1359 for (i = 0; i < 10; i++) {
1360
1361
1362
1363
1364
1365 usleep_range(10000, 20000);
1366 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1367 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1368 rsd = true;
1369 break;
1370 }
1371 }
1372
1373 if (flr)
1374 usleep_range(10000, 20000);
1375
1376 if (!rsd)
1377 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1378 vf->vf_id);
1379 usleep_range(10000, 20000);
1380
1381
1382 if (vf->lan_vsi_idx != 0)
1383 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1384
1385 i40e_cleanup_reset_vf(vf);
1386
1387 i40e_flush(hw);
1388 clear_bit(__I40E_VF_DISABLE, pf->state);
1389
1390 return true;
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1406 {
1407 struct i40e_hw *hw = &pf->hw;
1408 struct i40e_vf *vf;
1409 int i, v;
1410 u32 reg;
1411
1412
1413 if (!pf->num_alloc_vfs)
1414 return false;
1415
1416
1417 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1418 return false;
1419
1420
1421 for (v = 0; v < pf->num_alloc_vfs; v++)
1422 i40e_trigger_vf_reset(&pf->vf[v], flr);
1423
1424
1425
1426
1427
1428
1429
1430 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1431 usleep_range(10000, 20000);
1432
1433
1434
1435
1436 while (v < pf->num_alloc_vfs) {
1437 vf = &pf->vf[v];
1438 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1439 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1440 break;
1441
1442
1443
1444
1445 v++;
1446 }
1447 }
1448
1449 if (flr)
1450 usleep_range(10000, 20000);
1451
1452
1453
1454
1455 if (v < pf->num_alloc_vfs)
1456 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1457 pf->vf[v].vf_id);
1458 usleep_range(10000, 20000);
1459
1460
1461
1462
1463 for (v = 0; v < pf->num_alloc_vfs; v++) {
1464
1465 if (pf->vf[v].lan_vsi_idx == 0)
1466 continue;
1467
1468 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1469 }
1470
1471
1472
1473
1474 for (v = 0; v < pf->num_alloc_vfs; v++) {
1475
1476 if (pf->vf[v].lan_vsi_idx == 0)
1477 continue;
1478
1479 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1480 }
1481
1482
1483
1484
1485 mdelay(50);
1486
1487
1488 for (v = 0; v < pf->num_alloc_vfs; v++)
1489 i40e_cleanup_reset_vf(&pf->vf[v]);
1490
1491 i40e_flush(hw);
1492 clear_bit(__I40E_VF_DISABLE, pf->state);
1493
1494 return true;
1495 }
1496
1497
1498
1499
1500
1501
1502
1503 void i40e_free_vfs(struct i40e_pf *pf)
1504 {
1505 struct i40e_hw *hw = &pf->hw;
1506 u32 reg_idx, bit_idx;
1507 int i, tmp, vf_id;
1508
1509 if (!pf->vf)
1510 return;
1511 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1512 usleep_range(1000, 2000);
1513
1514 i40e_notify_client_of_vf_enable(pf, 0);
1515
1516
1517 for (i = 0; i < pf->num_alloc_vfs; i++) {
1518 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1519 continue;
1520
1521 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1522 }
1523
1524 for (i = 0; i < pf->num_alloc_vfs; i++) {
1525 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1526 continue;
1527
1528 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1529 }
1530
1531
1532
1533
1534
1535 if (!pci_vfs_assigned(pf->pdev))
1536 pci_disable_sriov(pf->pdev);
1537 else
1538 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1539
1540
1541 tmp = pf->num_alloc_vfs;
1542 pf->num_alloc_vfs = 0;
1543 for (i = 0; i < tmp; i++) {
1544 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1545 i40e_free_vf_res(&pf->vf[i]);
1546
1547 i40e_disable_vf_mappings(&pf->vf[i]);
1548 }
1549
1550 kfree(pf->vf);
1551 pf->vf = NULL;
1552
1553
1554
1555
1556
1557 if (!pci_vfs_assigned(pf->pdev)) {
1558
1559
1560
1561 for (vf_id = 0; vf_id < tmp; vf_id++) {
1562 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1563 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1564 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1565 }
1566 }
1567 clear_bit(__I40E_VF_DISABLE, pf->state);
1568 }
1569
1570 #ifdef CONFIG_PCI_IOV
1571
1572
1573
1574
1575
1576
1577
1578 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1579 {
1580 struct i40e_vf *vfs;
1581 int i, ret = 0;
1582
1583
1584 i40e_irq_dynamic_disable_icr0(pf);
1585
1586
1587 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1588 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1589 if (ret) {
1590 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1591 pf->num_alloc_vfs = 0;
1592 goto err_iov;
1593 }
1594 }
1595
1596 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1597 if (!vfs) {
1598 ret = -ENOMEM;
1599 goto err_alloc;
1600 }
1601 pf->vf = vfs;
1602
1603
1604 for (i = 0; i < num_alloc_vfs; i++) {
1605 vfs[i].pf = pf;
1606 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1607 vfs[i].vf_id = i;
1608
1609
1610 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1611 vfs[i].spoofchk = true;
1612
1613 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1614
1615 }
1616 pf->num_alloc_vfs = num_alloc_vfs;
1617
1618
1619 i40e_reset_all_vfs(pf, false);
1620
1621 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1622
1623 err_alloc:
1624 if (ret)
1625 i40e_free_vfs(pf);
1626 err_iov:
1627
1628 i40e_irq_dynamic_enable_icr0(pf);
1629 return ret;
1630 }
1631
1632 #endif
1633
1634
1635
1636
1637
1638
1639
1640 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1641 {
1642 #ifdef CONFIG_PCI_IOV
1643 struct i40e_pf *pf = pci_get_drvdata(pdev);
1644 int pre_existing_vfs = pci_num_vf(pdev);
1645 int err = 0;
1646
1647 if (test_bit(__I40E_TESTING, pf->state)) {
1648 dev_warn(&pdev->dev,
1649 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1650 err = -EPERM;
1651 goto err_out;
1652 }
1653
1654 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1655 i40e_free_vfs(pf);
1656 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1657 goto out;
1658
1659 if (num_vfs > pf->num_req_vfs) {
1660 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1661 num_vfs, pf->num_req_vfs);
1662 err = -EPERM;
1663 goto err_out;
1664 }
1665
1666 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1667 err = i40e_alloc_vfs(pf, num_vfs);
1668 if (err) {
1669 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1670 goto err_out;
1671 }
1672
1673 out:
1674 return num_vfs;
1675
1676 err_out:
1677 return err;
1678 #endif
1679 return 0;
1680 }
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1691 {
1692 struct i40e_pf *pf = pci_get_drvdata(pdev);
1693 int ret = 0;
1694
1695 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1696 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1697 return -EAGAIN;
1698 }
1699
1700 if (num_vfs) {
1701 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1702 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1703 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1704 }
1705 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1706 goto sriov_configure_out;
1707 }
1708
1709 if (!pci_vfs_assigned(pf->pdev)) {
1710 i40e_free_vfs(pf);
1711 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1712 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1713 } else {
1714 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1715 ret = -EINVAL;
1716 goto sriov_configure_out;
1717 }
1718 sriov_configure_out:
1719 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1720 return ret;
1721 }
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1736 u32 v_retval, u8 *msg, u16 msglen)
1737 {
1738 struct i40e_pf *pf;
1739 struct i40e_hw *hw;
1740 int abs_vf_id;
1741 i40e_status aq_ret;
1742
1743
1744 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1745 return -EINVAL;
1746
1747 pf = vf->pf;
1748 hw = &pf->hw;
1749 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1750
1751
1752 if (v_retval) {
1753 vf->num_invalid_msgs++;
1754 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1755 vf->vf_id, v_opcode, v_retval);
1756 if (vf->num_invalid_msgs >
1757 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1758 dev_err(&pf->pdev->dev,
1759 "Number of invalid messages exceeded for VF %d\n",
1760 vf->vf_id);
1761 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1762 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1763 }
1764 } else {
1765 vf->num_valid_msgs++;
1766
1767 vf->num_invalid_msgs = 0;
1768 }
1769
1770 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1771 msg, msglen, NULL);
1772 if (aq_ret) {
1773 dev_info(&pf->pdev->dev,
1774 "Unable to send the message to VF %d aq_err %d\n",
1775 vf->vf_id, pf->hw.aq.asq_last_status);
1776 return -EIO;
1777 }
1778
1779 return 0;
1780 }
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1791 enum virtchnl_ops opcode,
1792 i40e_status retval)
1793 {
1794 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1795 }
1796
1797
1798
1799
1800
1801
1802
1803
1804 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1805 {
1806 struct virtchnl_version_info info = {
1807 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1808 };
1809
1810 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1811
1812 if (VF_IS_V10(&vf->vf_ver))
1813 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1814 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1815 I40E_SUCCESS, (u8 *)&info,
1816 sizeof(struct virtchnl_version_info));
1817 }
1818
1819
1820
1821
1822
1823 static void i40e_del_qch(struct i40e_vf *vf)
1824 {
1825 struct i40e_pf *pf = vf->pf;
1826 int i;
1827
1828
1829
1830
1831 for (i = 1; i < vf->num_tc; i++) {
1832 if (vf->ch[i].vsi_idx) {
1833 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1834 vf->ch[i].vsi_idx = 0;
1835 vf->ch[i].vsi_id = 0;
1836 }
1837 }
1838 }
1839
1840
1841
1842
1843
1844
1845
1846
1847 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1848 {
1849 struct virtchnl_vf_resource *vfres = NULL;
1850 struct i40e_pf *pf = vf->pf;
1851 i40e_status aq_ret = 0;
1852 struct i40e_vsi *vsi;
1853 int num_vsis = 1;
1854 size_t len = 0;
1855 int ret;
1856
1857 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1858 aq_ret = I40E_ERR_PARAM;
1859 goto err;
1860 }
1861
1862 len = struct_size(vfres, vsi_res, num_vsis);
1863 vfres = kzalloc(len, GFP_KERNEL);
1864 if (!vfres) {
1865 aq_ret = I40E_ERR_NO_MEMORY;
1866 len = 0;
1867 goto err;
1868 }
1869 if (VF_IS_V11(&vf->vf_ver))
1870 vf->driver_caps = *(u32 *)msg;
1871 else
1872 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1873 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1874 VIRTCHNL_VF_OFFLOAD_VLAN;
1875
1876 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1877 vsi = pf->vsi[vf->lan_vsi_idx];
1878 if (!vsi->info.pvid)
1879 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1880
1881 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1882 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1883 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1884 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1885 } else {
1886 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1887 }
1888
1889 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1890 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1891 } else {
1892 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1893 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1894 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1895 else
1896 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1897 }
1898
1899 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1900 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1901 vfres->vf_cap_flags |=
1902 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1903 }
1904
1905 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1906 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1907
1908 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1909 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1910 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1911
1912 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1913 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1914 dev_err(&pf->pdev->dev,
1915 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1916 vf->vf_id);
1917 aq_ret = I40E_ERR_PARAM;
1918 goto err;
1919 }
1920 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1921 }
1922
1923 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1924 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1925 vfres->vf_cap_flags |=
1926 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1927 }
1928
1929 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1930 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1931
1932 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
1933 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
1934
1935 vfres->num_vsis = num_vsis;
1936 vfres->num_queue_pairs = vf->num_queue_pairs;
1937 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1938 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1939 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1940
1941 if (vf->lan_vsi_idx) {
1942 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1943 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1944 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1945
1946 vfres->vsi_res[0].qset_handle
1947 = le16_to_cpu(vsi->info.qs_handle[0]);
1948 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1949 vf->default_lan_addr.addr);
1950 }
1951 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1952
1953 err:
1954
1955 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1956 aq_ret, (u8 *)vfres, len);
1957
1958 kfree(vfres);
1959 return ret;
1960 }
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1971 {
1972 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1973 i40e_reset_vf(vf, false);
1974 }
1975
1976
1977
1978
1979
1980
1981
1982 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1983 {
1984 struct i40e_mac_filter *f;
1985 int num_vlans = 0, bkt;
1986
1987 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1988 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1989 num_vlans++;
1990 }
1991
1992 return num_vlans;
1993 }
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2004 {
2005 struct virtchnl_promisc_info *info =
2006 (struct virtchnl_promisc_info *)msg;
2007 struct i40e_pf *pf = vf->pf;
2008 i40e_status aq_ret = 0;
2009 bool allmulti = false;
2010 bool alluni = false;
2011
2012 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2013 aq_ret = I40E_ERR_PARAM;
2014 goto err_out;
2015 }
2016 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2017 dev_err(&pf->pdev->dev,
2018 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2019 vf->vf_id);
2020
2021
2022
2023
2024 aq_ret = 0;
2025 goto err_out;
2026 }
2027
2028 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2029 aq_ret = I40E_ERR_PARAM;
2030 goto err_out;
2031 }
2032
2033 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2034 aq_ret = I40E_ERR_PARAM;
2035 goto err_out;
2036 }
2037
2038
2039 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2040 allmulti = true;
2041
2042 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2043 alluni = true;
2044 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2045 alluni);
2046 if (aq_ret)
2047 goto err_out;
2048
2049 if (allmulti) {
2050 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2051 &vf->vf_states))
2052 dev_info(&pf->pdev->dev,
2053 "VF %d successfully set multicast promiscuous mode\n",
2054 vf->vf_id);
2055 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2056 &vf->vf_states))
2057 dev_info(&pf->pdev->dev,
2058 "VF %d successfully unset multicast promiscuous mode\n",
2059 vf->vf_id);
2060
2061 if (alluni) {
2062 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2063 &vf->vf_states))
2064 dev_info(&pf->pdev->dev,
2065 "VF %d successfully set unicast promiscuous mode\n",
2066 vf->vf_id);
2067 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2068 &vf->vf_states))
2069 dev_info(&pf->pdev->dev,
2070 "VF %d successfully unset unicast promiscuous mode\n",
2071 vf->vf_id);
2072
2073 err_out:
2074
2075 return i40e_vc_send_resp_to_vf(vf,
2076 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2077 aq_ret);
2078 }
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2089 {
2090 struct virtchnl_vsi_queue_config_info *qci =
2091 (struct virtchnl_vsi_queue_config_info *)msg;
2092 struct virtchnl_queue_pair_info *qpi;
2093 struct i40e_pf *pf = vf->pf;
2094 u16 vsi_id, vsi_queue_id = 0;
2095 u16 num_qps_all = 0;
2096 i40e_status aq_ret = 0;
2097 int i, j = 0, idx = 0;
2098
2099 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2100 aq_ret = I40E_ERR_PARAM;
2101 goto error_param;
2102 }
2103
2104 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2105 aq_ret = I40E_ERR_PARAM;
2106 goto error_param;
2107 }
2108
2109 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2110 aq_ret = I40E_ERR_PARAM;
2111 goto error_param;
2112 }
2113
2114 if (vf->adq_enabled) {
2115 for (i = 0; i < I40E_MAX_VF_VSI; i++)
2116 num_qps_all += vf->ch[i].num_qps;
2117 if (num_qps_all != qci->num_queue_pairs) {
2118 aq_ret = I40E_ERR_PARAM;
2119 goto error_param;
2120 }
2121 }
2122
2123 vsi_id = qci->vsi_id;
2124
2125 for (i = 0; i < qci->num_queue_pairs; i++) {
2126 qpi = &qci->qpair[i];
2127
2128 if (!vf->adq_enabled) {
2129 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2130 qpi->txq.queue_id)) {
2131 aq_ret = I40E_ERR_PARAM;
2132 goto error_param;
2133 }
2134
2135 vsi_queue_id = qpi->txq.queue_id;
2136
2137 if (qpi->txq.vsi_id != qci->vsi_id ||
2138 qpi->rxq.vsi_id != qci->vsi_id ||
2139 qpi->rxq.queue_id != vsi_queue_id) {
2140 aq_ret = I40E_ERR_PARAM;
2141 goto error_param;
2142 }
2143 }
2144
2145 if (vf->adq_enabled) {
2146 if (idx >= ARRAY_SIZE(vf->ch)) {
2147 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2148 goto error_param;
2149 }
2150 vsi_id = vf->ch[idx].vsi_id;
2151 }
2152
2153 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2154 &qpi->rxq) ||
2155 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2156 &qpi->txq)) {
2157 aq_ret = I40E_ERR_PARAM;
2158 goto error_param;
2159 }
2160
2161
2162
2163
2164
2165
2166 if (vf->adq_enabled) {
2167 if (idx >= ARRAY_SIZE(vf->ch)) {
2168 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2169 goto error_param;
2170 }
2171 if (j == (vf->ch[idx].num_qps - 1)) {
2172 idx++;
2173 j = 0;
2174 vsi_queue_id = 0;
2175 } else {
2176 j++;
2177 vsi_queue_id++;
2178 }
2179 }
2180 }
2181
2182 if (!vf->adq_enabled) {
2183 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2184 qci->num_queue_pairs;
2185 } else {
2186 for (i = 0; i < vf->num_tc; i++)
2187 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2188 vf->ch[i].num_qps;
2189 }
2190
2191 error_param:
2192
2193 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2194 aq_ret);
2195 }
2196
2197
2198
2199
2200
2201
2202
2203
2204 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2205 unsigned long queuemap)
2206 {
2207 u16 vsi_queue_id, queue_id;
2208
2209 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2210 if (vf->adq_enabled) {
2211 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2212 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2213 } else {
2214 queue_id = vsi_queue_id;
2215 }
2216
2217 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2218 return -EINVAL;
2219 }
2220
2221 return 0;
2222 }
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2233 {
2234 struct virtchnl_irq_map_info *irqmap_info =
2235 (struct virtchnl_irq_map_info *)msg;
2236 struct virtchnl_vector_map *map;
2237 u16 vsi_id;
2238 i40e_status aq_ret = 0;
2239 int i;
2240
2241 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2242 aq_ret = I40E_ERR_PARAM;
2243 goto error_param;
2244 }
2245
2246 if (irqmap_info->num_vectors >
2247 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2248 aq_ret = I40E_ERR_PARAM;
2249 goto error_param;
2250 }
2251
2252 for (i = 0; i < irqmap_info->num_vectors; i++) {
2253 map = &irqmap_info->vecmap[i];
2254
2255 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2256 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2257 aq_ret = I40E_ERR_PARAM;
2258 goto error_param;
2259 }
2260 vsi_id = map->vsi_id;
2261
2262 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2263 aq_ret = I40E_ERR_PARAM;
2264 goto error_param;
2265 }
2266
2267 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2268 aq_ret = I40E_ERR_PARAM;
2269 goto error_param;
2270 }
2271
2272 i40e_config_irq_link_list(vf, vsi_id, map);
2273 }
2274 error_param:
2275
2276 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2277 aq_ret);
2278 }
2279
2280
2281
2282
2283
2284
2285
2286 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2287 bool enable)
2288 {
2289 struct i40e_pf *pf = vsi->back;
2290 int ret = 0;
2291 u16 q_id;
2292
2293 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2294 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2295 vsi->base_queue + q_id,
2296 false , enable);
2297 if (ret)
2298 break;
2299 }
2300 return ret;
2301 }
2302
2303
2304
2305
2306
2307
2308
2309 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2310 bool enable)
2311 {
2312 struct i40e_pf *pf = vsi->back;
2313 int ret = 0;
2314 u16 q_id;
2315
2316 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2317 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2318 enable);
2319 if (ret)
2320 break;
2321 }
2322 return ret;
2323 }
2324
2325
2326
2327
2328
2329
2330
2331 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2332 {
2333 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2334 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2335 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2336 return false;
2337
2338 return true;
2339 }
2340
2341
2342
2343
2344
2345
2346
2347
2348 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2349 {
2350 struct virtchnl_queue_select *vqs =
2351 (struct virtchnl_queue_select *)msg;
2352 struct i40e_pf *pf = vf->pf;
2353 i40e_status aq_ret = 0;
2354 int i;
2355
2356 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2357 aq_ret = I40E_ERR_PARAM;
2358 goto error_param;
2359 }
2360
2361 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2362 aq_ret = I40E_ERR_PARAM;
2363 goto error_param;
2364 }
2365
2366 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2367 aq_ret = I40E_ERR_PARAM;
2368 goto error_param;
2369 }
2370
2371
2372 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2373 true)) {
2374 aq_ret = I40E_ERR_TIMEOUT;
2375 goto error_param;
2376 }
2377 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2378 true)) {
2379 aq_ret = I40E_ERR_TIMEOUT;
2380 goto error_param;
2381 }
2382
2383
2384 if (vf->adq_enabled) {
2385
2386 for (i = 1; i < vf->num_tc; i++) {
2387 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2388 aq_ret = I40E_ERR_TIMEOUT;
2389 }
2390 }
2391
2392 vf->queues_enabled = true;
2393
2394 error_param:
2395
2396 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2397 aq_ret);
2398 }
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2409 {
2410 struct virtchnl_queue_select *vqs =
2411 (struct virtchnl_queue_select *)msg;
2412 struct i40e_pf *pf = vf->pf;
2413 i40e_status aq_ret = 0;
2414
2415
2416 vf->queues_enabled = false;
2417
2418 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2419 aq_ret = I40E_ERR_PARAM;
2420 goto error_param;
2421 }
2422
2423 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2424 aq_ret = I40E_ERR_PARAM;
2425 goto error_param;
2426 }
2427
2428 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2429 aq_ret = I40E_ERR_PARAM;
2430 goto error_param;
2431 }
2432
2433
2434 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2435 false)) {
2436 aq_ret = I40E_ERR_TIMEOUT;
2437 goto error_param;
2438 }
2439 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2440 false)) {
2441 aq_ret = I40E_ERR_TIMEOUT;
2442 goto error_param;
2443 }
2444 error_param:
2445
2446 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2447 aq_ret);
2448 }
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2461 {
2462 struct virtchnl_vf_res_request *vfres =
2463 (struct virtchnl_vf_res_request *)msg;
2464 u16 req_pairs = vfres->num_queue_pairs;
2465 u8 cur_pairs = vf->num_queue_pairs;
2466 struct i40e_pf *pf = vf->pf;
2467
2468 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2469 return -EINVAL;
2470
2471 if (req_pairs > I40E_MAX_VF_QUEUES) {
2472 dev_err(&pf->pdev->dev,
2473 "VF %d tried to request more than %d queues.\n",
2474 vf->vf_id,
2475 I40E_MAX_VF_QUEUES);
2476 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2477 } else if (req_pairs - cur_pairs > pf->queues_left) {
2478 dev_warn(&pf->pdev->dev,
2479 "VF %d requested %d more queues, but only %d left.\n",
2480 vf->vf_id,
2481 req_pairs - cur_pairs,
2482 pf->queues_left);
2483 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2484 } else {
2485
2486 vf->num_req_queues = req_pairs;
2487 i40e_vc_notify_vf_reset(vf);
2488 i40e_reset_vf(vf, false);
2489 return 0;
2490 }
2491
2492 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2493 (u8 *)vfres, sizeof(*vfres));
2494 }
2495
2496
2497
2498
2499
2500
2501
2502
2503 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2504 {
2505 struct virtchnl_queue_select *vqs =
2506 (struct virtchnl_queue_select *)msg;
2507 struct i40e_pf *pf = vf->pf;
2508 struct i40e_eth_stats stats;
2509 i40e_status aq_ret = 0;
2510 struct i40e_vsi *vsi;
2511
2512 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2513
2514 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2515 aq_ret = I40E_ERR_PARAM;
2516 goto error_param;
2517 }
2518
2519 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2520 aq_ret = I40E_ERR_PARAM;
2521 goto error_param;
2522 }
2523
2524 vsi = pf->vsi[vf->lan_vsi_idx];
2525 if (!vsi) {
2526 aq_ret = I40E_ERR_PARAM;
2527 goto error_param;
2528 }
2529 i40e_update_eth_stats(vsi);
2530 stats = vsi->eth_stats;
2531
2532 error_param:
2533
2534 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2535 (u8 *)&stats, sizeof(stats));
2536 }
2537
2538
2539
2540
2541 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2542 #define I40E_VC_MAX_VLAN_PER_VF 16
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2562 struct virtchnl_ether_addr_list *al)
2563 {
2564 struct i40e_pf *pf = vf->pf;
2565 int i;
2566
2567
2568
2569
2570
2571 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2572 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
2573 dev_err(&pf->pdev->dev,
2574 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2575 return -EPERM;
2576 }
2577
2578 for (i = 0; i < al->num_elements; i++) {
2579 u8 *addr = al->list[i].addr;
2580
2581 if (is_broadcast_ether_addr(addr) ||
2582 is_zero_ether_addr(addr)) {
2583 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2584 addr);
2585 return I40E_ERR_INVALID_MAC_ADDR;
2586 }
2587
2588
2589
2590
2591
2592
2593
2594
2595 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2596 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2597 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2598 dev_err(&pf->pdev->dev,
2599 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2600 return -EPERM;
2601 }
2602 }
2603
2604 return 0;
2605 }
2606
2607
2608
2609
2610
2611
2612
2613
2614 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2615 {
2616 struct virtchnl_ether_addr_list *al =
2617 (struct virtchnl_ether_addr_list *)msg;
2618 struct i40e_pf *pf = vf->pf;
2619 struct i40e_vsi *vsi = NULL;
2620 i40e_status ret = 0;
2621 int i;
2622
2623 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2624 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2625 ret = I40E_ERR_PARAM;
2626 goto error_param;
2627 }
2628
2629 vsi = pf->vsi[vf->lan_vsi_idx];
2630
2631
2632
2633
2634 spin_lock_bh(&vsi->mac_filter_hash_lock);
2635
2636 ret = i40e_check_vf_permission(vf, al);
2637 if (ret) {
2638 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2639 goto error_param;
2640 }
2641
2642
2643 for (i = 0; i < al->num_elements; i++) {
2644 struct i40e_mac_filter *f;
2645
2646 f = i40e_find_mac(vsi, al->list[i].addr);
2647 if (!f) {
2648 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2649
2650 if (!f) {
2651 dev_err(&pf->pdev->dev,
2652 "Unable to add MAC filter %pM for VF %d\n",
2653 al->list[i].addr, vf->vf_id);
2654 ret = I40E_ERR_PARAM;
2655 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2656 goto error_param;
2657 } else {
2658 vf->num_mac++;
2659 }
2660 }
2661 }
2662 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2663
2664
2665 ret = i40e_sync_vsi_filters(vsi);
2666 if (ret)
2667 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2668 vf->vf_id, ret);
2669
2670 error_param:
2671
2672 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2673 ret);
2674 }
2675
2676
2677
2678
2679
2680
2681
2682
2683 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2684 {
2685 struct virtchnl_ether_addr_list *al =
2686 (struct virtchnl_ether_addr_list *)msg;
2687 struct i40e_pf *pf = vf->pf;
2688 struct i40e_vsi *vsi = NULL;
2689 i40e_status ret = 0;
2690 int i;
2691
2692 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2693 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2694 ret = I40E_ERR_PARAM;
2695 goto error_param;
2696 }
2697
2698 for (i = 0; i < al->num_elements; i++) {
2699 if (is_broadcast_ether_addr(al->list[i].addr) ||
2700 is_zero_ether_addr(al->list[i].addr)) {
2701 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2702 al->list[i].addr, vf->vf_id);
2703 ret = I40E_ERR_INVALID_MAC_ADDR;
2704 goto error_param;
2705 }
2706
2707 if (vf->pf_set_mac &&
2708 ether_addr_equal(al->list[i].addr,
2709 vf->default_lan_addr.addr)) {
2710 dev_err(&pf->pdev->dev,
2711 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
2712 vf->default_lan_addr.addr, vf->vf_id);
2713 ret = I40E_ERR_PARAM;
2714 goto error_param;
2715 }
2716 }
2717 vsi = pf->vsi[vf->lan_vsi_idx];
2718
2719 spin_lock_bh(&vsi->mac_filter_hash_lock);
2720
2721 for (i = 0; i < al->num_elements; i++)
2722 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2723 ret = I40E_ERR_INVALID_MAC_ADDR;
2724 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2725 goto error_param;
2726 } else {
2727 vf->num_mac--;
2728 }
2729
2730 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2731
2732
2733 ret = i40e_sync_vsi_filters(vsi);
2734 if (ret)
2735 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2736 vf->vf_id, ret);
2737
2738 error_param:
2739
2740 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2741 ret);
2742 }
2743
2744
2745
2746
2747
2748
2749
2750
2751 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
2752 {
2753 struct virtchnl_vlan_filter_list *vfl =
2754 (struct virtchnl_vlan_filter_list *)msg;
2755 struct i40e_pf *pf = vf->pf;
2756 struct i40e_vsi *vsi = NULL;
2757 i40e_status aq_ret = 0;
2758 int i;
2759
2760 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2761 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2762 dev_err(&pf->pdev->dev,
2763 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2764 goto error_param;
2765 }
2766 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2767 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2768 aq_ret = I40E_ERR_PARAM;
2769 goto error_param;
2770 }
2771
2772 for (i = 0; i < vfl->num_elements; i++) {
2773 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2774 aq_ret = I40E_ERR_PARAM;
2775 dev_err(&pf->pdev->dev,
2776 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2777 goto error_param;
2778 }
2779 }
2780 vsi = pf->vsi[vf->lan_vsi_idx];
2781 if (vsi->info.pvid) {
2782 aq_ret = I40E_ERR_PARAM;
2783 goto error_param;
2784 }
2785
2786 i40e_vlan_stripping_enable(vsi);
2787 for (i = 0; i < vfl->num_elements; i++) {
2788
2789 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2790 if (!ret)
2791 vf->num_vlan++;
2792
2793 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2794 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2795 true,
2796 vfl->vlan_id[i],
2797 NULL);
2798 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2799 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2800 true,
2801 vfl->vlan_id[i],
2802 NULL);
2803
2804 if (ret)
2805 dev_err(&pf->pdev->dev,
2806 "Unable to add VLAN filter %d for VF %d, error %d\n",
2807 vfl->vlan_id[i], vf->vf_id, ret);
2808 }
2809
2810 error_param:
2811
2812 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2813 }
2814
2815
2816
2817
2818
2819
2820
2821
2822 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
2823 {
2824 struct virtchnl_vlan_filter_list *vfl =
2825 (struct virtchnl_vlan_filter_list *)msg;
2826 struct i40e_pf *pf = vf->pf;
2827 struct i40e_vsi *vsi = NULL;
2828 i40e_status aq_ret = 0;
2829 int i;
2830
2831 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2832 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2833 aq_ret = I40E_ERR_PARAM;
2834 goto error_param;
2835 }
2836
2837 for (i = 0; i < vfl->num_elements; i++) {
2838 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2839 aq_ret = I40E_ERR_PARAM;
2840 goto error_param;
2841 }
2842 }
2843
2844 vsi = pf->vsi[vf->lan_vsi_idx];
2845 if (vsi->info.pvid) {
2846 if (vfl->num_elements > 1 || vfl->vlan_id[0])
2847 aq_ret = I40E_ERR_PARAM;
2848 goto error_param;
2849 }
2850
2851 for (i = 0; i < vfl->num_elements; i++) {
2852 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2853 vf->num_vlan--;
2854
2855 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2856 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2857 false,
2858 vfl->vlan_id[i],
2859 NULL);
2860 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2861 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2862 false,
2863 vfl->vlan_id[i],
2864 NULL);
2865 }
2866
2867 error_param:
2868
2869 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2870 }
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2881 {
2882 struct i40e_pf *pf = vf->pf;
2883 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2884 i40e_status aq_ret = 0;
2885
2886 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2887 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2888 aq_ret = I40E_ERR_PARAM;
2889 goto error_param;
2890 }
2891
2892 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2893 msg, msglen);
2894
2895 error_param:
2896
2897 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2898 aq_ret);
2899 }
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
2910 {
2911 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2912 (struct virtchnl_iwarp_qvlist_info *)msg;
2913 i40e_status aq_ret = 0;
2914
2915 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2916 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2917 aq_ret = I40E_ERR_PARAM;
2918 goto error_param;
2919 }
2920
2921 if (config) {
2922 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2923 aq_ret = I40E_ERR_PARAM;
2924 } else {
2925 i40e_release_iwarp_qvlist(vf);
2926 }
2927
2928 error_param:
2929
2930 return i40e_vc_send_resp_to_vf(vf,
2931 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2932 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2933 aq_ret);
2934 }
2935
2936
2937
2938
2939
2940
2941
2942
2943 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
2944 {
2945 struct virtchnl_rss_key *vrk =
2946 (struct virtchnl_rss_key *)msg;
2947 struct i40e_pf *pf = vf->pf;
2948 struct i40e_vsi *vsi = NULL;
2949 i40e_status aq_ret = 0;
2950
2951 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2952 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
2953 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2954 aq_ret = I40E_ERR_PARAM;
2955 goto err;
2956 }
2957
2958 vsi = pf->vsi[vf->lan_vsi_idx];
2959 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2960 err:
2961
2962 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2963 aq_ret);
2964 }
2965
2966
2967
2968
2969
2970
2971
2972
2973 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
2974 {
2975 struct virtchnl_rss_lut *vrl =
2976 (struct virtchnl_rss_lut *)msg;
2977 struct i40e_pf *pf = vf->pf;
2978 struct i40e_vsi *vsi = NULL;
2979 i40e_status aq_ret = 0;
2980 u16 i;
2981
2982 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2983 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
2984 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2985 aq_ret = I40E_ERR_PARAM;
2986 goto err;
2987 }
2988
2989 for (i = 0; i < vrl->lut_entries; i++)
2990 if (vrl->lut[i] >= vf->num_queue_pairs) {
2991 aq_ret = I40E_ERR_PARAM;
2992 goto err;
2993 }
2994
2995 vsi = pf->vsi[vf->lan_vsi_idx];
2996 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2997
2998 err:
2999 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3000 aq_ret);
3001 }
3002
3003
3004
3005
3006
3007
3008
3009
3010 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3011 {
3012 struct virtchnl_rss_hena *vrh = NULL;
3013 struct i40e_pf *pf = vf->pf;
3014 i40e_status aq_ret = 0;
3015 int len = 0;
3016
3017 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3018 aq_ret = I40E_ERR_PARAM;
3019 goto err;
3020 }
3021 len = sizeof(struct virtchnl_rss_hena);
3022
3023 vrh = kzalloc(len, GFP_KERNEL);
3024 if (!vrh) {
3025 aq_ret = I40E_ERR_NO_MEMORY;
3026 len = 0;
3027 goto err;
3028 }
3029 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3030 err:
3031
3032 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3033 aq_ret, (u8 *)vrh, len);
3034 kfree(vrh);
3035 return aq_ret;
3036 }
3037
3038
3039
3040
3041
3042
3043
3044
3045 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3046 {
3047 struct virtchnl_rss_hena *vrh =
3048 (struct virtchnl_rss_hena *)msg;
3049 struct i40e_pf *pf = vf->pf;
3050 struct i40e_hw *hw = &pf->hw;
3051 i40e_status aq_ret = 0;
3052
3053 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3054 aq_ret = I40E_ERR_PARAM;
3055 goto err;
3056 }
3057 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3058 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3059 (u32)(vrh->hena >> 32));
3060
3061
3062 err:
3063 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3064 }
3065
3066
3067
3068
3069
3070
3071
3072
3073 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3074 {
3075 i40e_status aq_ret = 0;
3076 struct i40e_vsi *vsi;
3077
3078 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3079 aq_ret = I40E_ERR_PARAM;
3080 goto err;
3081 }
3082
3083 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3084 i40e_vlan_stripping_enable(vsi);
3085
3086
3087 err:
3088 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3089 aq_ret);
3090 }
3091
3092
3093
3094
3095
3096
3097
3098
3099 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3100 {
3101 i40e_status aq_ret = 0;
3102 struct i40e_vsi *vsi;
3103
3104 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3105 aq_ret = I40E_ERR_PARAM;
3106 goto err;
3107 }
3108
3109 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3110 i40e_vlan_stripping_disable(vsi);
3111
3112
3113 err:
3114 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3115 aq_ret);
3116 }
3117
3118
3119
3120
3121
3122
3123
3124
3125 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3126 struct virtchnl_filter *tc_filter)
3127 {
3128 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3129 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3130 struct i40e_pf *pf = vf->pf;
3131 struct i40e_vsi *vsi = NULL;
3132 struct i40e_mac_filter *f;
3133 struct hlist_node *h;
3134 bool found = false;
3135 int bkt;
3136
3137 if (!tc_filter->action) {
3138 dev_info(&pf->pdev->dev,
3139 "VF %d: Currently ADq doesn't support Drop Action\n",
3140 vf->vf_id);
3141 goto err;
3142 }
3143
3144
3145 if (!tc_filter->action_meta ||
3146 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3147 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3148 vf->vf_id, tc_filter->action_meta);
3149 goto err;
3150 }
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3162 vsi = pf->vsi[vf->lan_vsi_idx];
3163 f = i40e_find_mac(vsi, data.dst_mac);
3164
3165 if (!f) {
3166 dev_info(&pf->pdev->dev,
3167 "Destination MAC %pM doesn't belong to VF %d\n",
3168 data.dst_mac, vf->vf_id);
3169 goto err;
3170 }
3171
3172 if (mask.vlan_id) {
3173 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3174 hlist) {
3175 if (f->vlan == ntohs(data.vlan_id)) {
3176 found = true;
3177 break;
3178 }
3179 }
3180 if (!found) {
3181 dev_info(&pf->pdev->dev,
3182 "VF %d doesn't have any VLAN id %u\n",
3183 vf->vf_id, ntohs(data.vlan_id));
3184 goto err;
3185 }
3186 }
3187 } else {
3188
3189 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3190 dev_err(&pf->pdev->dev,
3191 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3192 vf->vf_id);
3193 return I40E_ERR_CONFIG;
3194 }
3195 }
3196
3197 if (mask.dst_mac[0] & data.dst_mac[0]) {
3198 if (is_broadcast_ether_addr(data.dst_mac) ||
3199 is_zero_ether_addr(data.dst_mac)) {
3200 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3201 vf->vf_id, data.dst_mac);
3202 goto err;
3203 }
3204 }
3205
3206 if (mask.src_mac[0] & data.src_mac[0]) {
3207 if (is_broadcast_ether_addr(data.src_mac) ||
3208 is_zero_ether_addr(data.src_mac)) {
3209 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3210 vf->vf_id, data.src_mac);
3211 goto err;
3212 }
3213 }
3214
3215 if (mask.dst_port & data.dst_port) {
3216 if (!data.dst_port) {
3217 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3218 vf->vf_id);
3219 goto err;
3220 }
3221 }
3222
3223 if (mask.src_port & data.src_port) {
3224 if (!data.src_port) {
3225 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3226 vf->vf_id);
3227 goto err;
3228 }
3229 }
3230
3231 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3232 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3233 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3234 vf->vf_id);
3235 goto err;
3236 }
3237
3238 if (mask.vlan_id & data.vlan_id) {
3239 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3240 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3241 vf->vf_id);
3242 goto err;
3243 }
3244 }
3245
3246 return I40E_SUCCESS;
3247 err:
3248 return I40E_ERR_CONFIG;
3249 }
3250
3251
3252
3253
3254
3255
3256 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3257 {
3258 struct i40e_pf *pf = vf->pf;
3259 struct i40e_vsi *vsi = NULL;
3260 int i;
3261
3262 for (i = 0; i < vf->num_tc ; i++) {
3263 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3264 if (vsi && vsi->seid == seid)
3265 return vsi;
3266 }
3267 return NULL;
3268 }
3269
3270
3271
3272
3273
3274
3275
3276 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3277 {
3278 struct i40e_cloud_filter *cfilter = NULL;
3279 struct i40e_pf *pf = vf->pf;
3280 struct i40e_vsi *vsi = NULL;
3281 struct hlist_node *node;
3282 int ret;
3283
3284 hlist_for_each_entry_safe(cfilter, node,
3285 &vf->cloud_filter_list, cloud_node) {
3286 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3287
3288 if (!vsi) {
3289 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3290 vf->vf_id, cfilter->seid);
3291 continue;
3292 }
3293
3294 if (cfilter->dst_port)
3295 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3296 false);
3297 else
3298 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3299 if (ret)
3300 dev_err(&pf->pdev->dev,
3301 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3302 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3303 i40e_aq_str(&pf->hw,
3304 pf->hw.aq.asq_last_status));
3305
3306 hlist_del(&cfilter->cloud_node);
3307 kfree(cfilter);
3308 vf->num_cloud_filters--;
3309 }
3310 }
3311
3312
3313
3314
3315
3316
3317
3318
3319 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3320 {
3321 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3322 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3323 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3324 struct i40e_cloud_filter cfilter, *cf = NULL;
3325 struct i40e_pf *pf = vf->pf;
3326 struct i40e_vsi *vsi = NULL;
3327 struct hlist_node *node;
3328 i40e_status aq_ret = 0;
3329 int i, ret;
3330
3331 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3332 aq_ret = I40E_ERR_PARAM;
3333 goto err;
3334 }
3335
3336 if (!vf->adq_enabled) {
3337 dev_info(&pf->pdev->dev,
3338 "VF %d: ADq not enabled, can't apply cloud filter\n",
3339 vf->vf_id);
3340 aq_ret = I40E_ERR_PARAM;
3341 goto err;
3342 }
3343
3344 if (i40e_validate_cloud_filter(vf, vcf)) {
3345 dev_info(&pf->pdev->dev,
3346 "VF %d: Invalid input, can't apply cloud filter\n",
3347 vf->vf_id);
3348 aq_ret = I40E_ERR_PARAM;
3349 goto err;
3350 }
3351
3352 memset(&cfilter, 0, sizeof(cfilter));
3353
3354 for (i = 0; i < ETH_ALEN; i++)
3355 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3356
3357
3358 for (i = 0; i < ETH_ALEN; i++)
3359 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3360
3361 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3362 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3363 cfilter.src_port = mask.src_port & tcf.src_port;
3364
3365 switch (vcf->flow_type) {
3366 case VIRTCHNL_TCP_V4_FLOW:
3367 cfilter.n_proto = ETH_P_IP;
3368 if (mask.dst_ip[0] & tcf.dst_ip[0])
3369 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3370 ARRAY_SIZE(tcf.dst_ip));
3371 else if (mask.src_ip[0] & tcf.dst_ip[0])
3372 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3373 ARRAY_SIZE(tcf.dst_ip));
3374 break;
3375 case VIRTCHNL_TCP_V6_FLOW:
3376 cfilter.n_proto = ETH_P_IPV6;
3377 if (mask.dst_ip[3] & tcf.dst_ip[3])
3378 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3379 sizeof(cfilter.ip.v6.dst_ip6));
3380 if (mask.src_ip[3] & tcf.src_ip[3])
3381 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3382 sizeof(cfilter.ip.v6.src_ip6));
3383 break;
3384 default:
3385
3386
3387
3388 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3389 vf->vf_id);
3390 }
3391
3392
3393 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3394 cfilter.seid = vsi->seid;
3395 cfilter.flags = vcf->field_flags;
3396
3397
3398 if (tcf.dst_port)
3399 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3400 else
3401 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3402 if (ret) {
3403 dev_err(&pf->pdev->dev,
3404 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3405 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3406 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3407 goto err;
3408 }
3409
3410 hlist_for_each_entry_safe(cf, node,
3411 &vf->cloud_filter_list, cloud_node) {
3412 if (cf->seid != cfilter.seid)
3413 continue;
3414 if (mask.dst_port)
3415 if (cfilter.dst_port != cf->dst_port)
3416 continue;
3417 if (mask.dst_mac[0])
3418 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3419 continue;
3420
3421 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3422 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3423 ARRAY_SIZE(tcf.dst_ip)))
3424 continue;
3425
3426 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3427 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3428 sizeof(cfilter.ip.v6.src_ip6)))
3429 continue;
3430 if (mask.vlan_id)
3431 if (cfilter.vlan_id != cf->vlan_id)
3432 continue;
3433
3434 hlist_del(&cf->cloud_node);
3435 kfree(cf);
3436 vf->num_cloud_filters--;
3437 }
3438
3439 err:
3440 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3441 aq_ret);
3442 }
3443
3444
3445
3446
3447
3448
3449
3450
3451 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3452 {
3453 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3454 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3455 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3456 struct i40e_cloud_filter *cfilter = NULL;
3457 struct i40e_pf *pf = vf->pf;
3458 struct i40e_vsi *vsi = NULL;
3459 i40e_status aq_ret = 0;
3460 int i, ret;
3461
3462 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3463 aq_ret = I40E_ERR_PARAM;
3464 goto err_out;
3465 }
3466
3467 if (!vf->adq_enabled) {
3468 dev_info(&pf->pdev->dev,
3469 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3470 vf->vf_id);
3471 aq_ret = I40E_ERR_PARAM;
3472 goto err_out;
3473 }
3474
3475 if (i40e_validate_cloud_filter(vf, vcf)) {
3476 dev_info(&pf->pdev->dev,
3477 "VF %d: Invalid input/s, can't apply cloud filter\n",
3478 vf->vf_id);
3479 aq_ret = I40E_ERR_PARAM;
3480 goto err_out;
3481 }
3482
3483 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3484 if (!cfilter)
3485 return -ENOMEM;
3486
3487
3488 for (i = 0; i < ETH_ALEN; i++)
3489 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3490
3491
3492 for (i = 0; i < ETH_ALEN; i++)
3493 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3494
3495 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3496 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3497 cfilter->src_port = mask.src_port & tcf.src_port;
3498
3499 switch (vcf->flow_type) {
3500 case VIRTCHNL_TCP_V4_FLOW:
3501 cfilter->n_proto = ETH_P_IP;
3502 if (mask.dst_ip[0] & tcf.dst_ip[0])
3503 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3504 ARRAY_SIZE(tcf.dst_ip));
3505 else if (mask.src_ip[0] & tcf.dst_ip[0])
3506 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3507 ARRAY_SIZE(tcf.dst_ip));
3508 break;
3509 case VIRTCHNL_TCP_V6_FLOW:
3510 cfilter->n_proto = ETH_P_IPV6;
3511 if (mask.dst_ip[3] & tcf.dst_ip[3])
3512 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3513 sizeof(cfilter->ip.v6.dst_ip6));
3514 if (mask.src_ip[3] & tcf.src_ip[3])
3515 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3516 sizeof(cfilter->ip.v6.src_ip6));
3517 break;
3518 default:
3519
3520
3521
3522 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3523 vf->vf_id);
3524 }
3525
3526
3527 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3528 cfilter->seid = vsi->seid;
3529 cfilter->flags = vcf->field_flags;
3530
3531
3532 if (tcf.dst_port)
3533 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3534 else
3535 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3536 if (ret) {
3537 dev_err(&pf->pdev->dev,
3538 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3539 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3540 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3541 goto err_free;
3542 }
3543
3544 INIT_HLIST_NODE(&cfilter->cloud_node);
3545 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3546
3547 cfilter = NULL;
3548 vf->num_cloud_filters++;
3549 err_free:
3550 kfree(cfilter);
3551 err_out:
3552 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3553 aq_ret);
3554 }
3555
3556
3557
3558
3559
3560
3561 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3562 {
3563 struct virtchnl_tc_info *tci =
3564 (struct virtchnl_tc_info *)msg;
3565 struct i40e_pf *pf = vf->pf;
3566 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3567 int i, adq_request_qps = 0;
3568 i40e_status aq_ret = 0;
3569 u64 speed = 0;
3570
3571 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3572 aq_ret = I40E_ERR_PARAM;
3573 goto err;
3574 }
3575
3576
3577 if (vf->spoofchk) {
3578 dev_err(&pf->pdev->dev,
3579 "Spoof check is ON, turn it OFF to enable ADq\n");
3580 aq_ret = I40E_ERR_PARAM;
3581 goto err;
3582 }
3583
3584 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3585 dev_err(&pf->pdev->dev,
3586 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3587 vf->vf_id);
3588 aq_ret = I40E_ERR_PARAM;
3589 goto err;
3590 }
3591
3592
3593 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3594 dev_err(&pf->pdev->dev,
3595 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3596 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3597 aq_ret = I40E_ERR_PARAM;
3598 goto err;
3599 }
3600
3601
3602 for (i = 0; i < tci->num_tc; i++)
3603 if (!tci->list[i].count ||
3604 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3605 dev_err(&pf->pdev->dev,
3606 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3607 vf->vf_id, i, tci->list[i].count,
3608 I40E_DEFAULT_QUEUES_PER_VF);
3609 aq_ret = I40E_ERR_PARAM;
3610 goto err;
3611 }
3612
3613
3614 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3615
3616 if (pf->queues_left < adq_request_qps) {
3617 dev_err(&pf->pdev->dev,
3618 "No queues left to allocate to VF %d\n",
3619 vf->vf_id);
3620 aq_ret = I40E_ERR_PARAM;
3621 goto err;
3622 } else {
3623
3624
3625
3626
3627 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3628 }
3629
3630
3631 switch (ls->link_speed) {
3632 case VIRTCHNL_LINK_SPEED_100MB:
3633 speed = SPEED_100;
3634 break;
3635 case VIRTCHNL_LINK_SPEED_1GB:
3636 speed = SPEED_1000;
3637 break;
3638 case VIRTCHNL_LINK_SPEED_10GB:
3639 speed = SPEED_10000;
3640 break;
3641 case VIRTCHNL_LINK_SPEED_20GB:
3642 speed = SPEED_20000;
3643 break;
3644 case VIRTCHNL_LINK_SPEED_25GB:
3645 speed = SPEED_25000;
3646 break;
3647 case VIRTCHNL_LINK_SPEED_40GB:
3648 speed = SPEED_40000;
3649 break;
3650 default:
3651 dev_err(&pf->pdev->dev,
3652 "Cannot detect link speed\n");
3653 aq_ret = I40E_ERR_PARAM;
3654 goto err;
3655 }
3656
3657
3658 vf->num_tc = tci->num_tc;
3659 for (i = 0; i < vf->num_tc; i++) {
3660 if (tci->list[i].max_tx_rate) {
3661 if (tci->list[i].max_tx_rate > speed) {
3662 dev_err(&pf->pdev->dev,
3663 "Invalid max tx rate %llu specified for VF %d.",
3664 tci->list[i].max_tx_rate,
3665 vf->vf_id);
3666 aq_ret = I40E_ERR_PARAM;
3667 goto err;
3668 } else {
3669 vf->ch[i].max_tx_rate =
3670 tci->list[i].max_tx_rate;
3671 }
3672 }
3673 vf->ch[i].num_qps = tci->list[i].count;
3674 }
3675
3676
3677 vf->adq_enabled = true;
3678
3679
3680
3681
3682 vf->num_req_queues = 0;
3683
3684
3685 i40e_vc_notify_vf_reset(vf);
3686 i40e_reset_vf(vf, false);
3687
3688 return I40E_SUCCESS;
3689
3690
3691 err:
3692 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3693 aq_ret);
3694 }
3695
3696
3697
3698
3699
3700
3701 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3702 {
3703 struct i40e_pf *pf = vf->pf;
3704 i40e_status aq_ret = 0;
3705
3706 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3707 aq_ret = I40E_ERR_PARAM;
3708 goto err;
3709 }
3710
3711 if (vf->adq_enabled) {
3712 i40e_del_all_cloud_filters(vf);
3713 i40e_del_qch(vf);
3714 vf->adq_enabled = false;
3715 vf->num_tc = 0;
3716 dev_info(&pf->pdev->dev,
3717 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3718 vf->vf_id);
3719 } else {
3720 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3721 vf->vf_id);
3722 aq_ret = I40E_ERR_PARAM;
3723 }
3724
3725
3726 i40e_vc_notify_vf_reset(vf);
3727 i40e_reset_vf(vf, false);
3728
3729 return I40E_SUCCESS;
3730
3731 err:
3732 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3733 aq_ret);
3734 }
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3749 u32 __always_unused v_retval, u8 *msg, u16 msglen)
3750 {
3751 struct i40e_hw *hw = &pf->hw;
3752 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3753 struct i40e_vf *vf;
3754 int ret;
3755
3756 pf->vf_aq_requests++;
3757 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
3758 return -EINVAL;
3759 vf = &(pf->vf[local_vf_id]);
3760
3761
3762 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3763 return I40E_ERR_PARAM;
3764
3765
3766 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3767
3768 if (ret) {
3769 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3770 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3771 local_vf_id, v_opcode, msglen);
3772 switch (ret) {
3773 case VIRTCHNL_STATUS_ERR_PARAM:
3774 return -EPERM;
3775 default:
3776 return -EINVAL;
3777 }
3778 }
3779
3780 switch (v_opcode) {
3781 case VIRTCHNL_OP_VERSION:
3782 ret = i40e_vc_get_version_msg(vf, msg);
3783 break;
3784 case VIRTCHNL_OP_GET_VF_RESOURCES:
3785 ret = i40e_vc_get_vf_resources_msg(vf, msg);
3786 i40e_vc_notify_vf_link_state(vf);
3787 break;
3788 case VIRTCHNL_OP_RESET_VF:
3789 i40e_vc_reset_vf_msg(vf);
3790 ret = 0;
3791 break;
3792 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3793 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
3794 break;
3795 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3796 ret = i40e_vc_config_queues_msg(vf, msg);
3797 break;
3798 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3799 ret = i40e_vc_config_irq_map_msg(vf, msg);
3800 break;
3801 case VIRTCHNL_OP_ENABLE_QUEUES:
3802 ret = i40e_vc_enable_queues_msg(vf, msg);
3803 i40e_vc_notify_vf_link_state(vf);
3804 break;
3805 case VIRTCHNL_OP_DISABLE_QUEUES:
3806 ret = i40e_vc_disable_queues_msg(vf, msg);
3807 break;
3808 case VIRTCHNL_OP_ADD_ETH_ADDR:
3809 ret = i40e_vc_add_mac_addr_msg(vf, msg);
3810 break;
3811 case VIRTCHNL_OP_DEL_ETH_ADDR:
3812 ret = i40e_vc_del_mac_addr_msg(vf, msg);
3813 break;
3814 case VIRTCHNL_OP_ADD_VLAN:
3815 ret = i40e_vc_add_vlan_msg(vf, msg);
3816 break;
3817 case VIRTCHNL_OP_DEL_VLAN:
3818 ret = i40e_vc_remove_vlan_msg(vf, msg);
3819 break;
3820 case VIRTCHNL_OP_GET_STATS:
3821 ret = i40e_vc_get_stats_msg(vf, msg);
3822 break;
3823 case VIRTCHNL_OP_IWARP:
3824 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3825 break;
3826 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3827 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
3828 break;
3829 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3830 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
3831 break;
3832 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3833 ret = i40e_vc_config_rss_key(vf, msg);
3834 break;
3835 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3836 ret = i40e_vc_config_rss_lut(vf, msg);
3837 break;
3838 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3839 ret = i40e_vc_get_rss_hena(vf, msg);
3840 break;
3841 case VIRTCHNL_OP_SET_RSS_HENA:
3842 ret = i40e_vc_set_rss_hena(vf, msg);
3843 break;
3844 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3845 ret = i40e_vc_enable_vlan_stripping(vf, msg);
3846 break;
3847 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3848 ret = i40e_vc_disable_vlan_stripping(vf, msg);
3849 break;
3850 case VIRTCHNL_OP_REQUEST_QUEUES:
3851 ret = i40e_vc_request_queues_msg(vf, msg);
3852 break;
3853 case VIRTCHNL_OP_ENABLE_CHANNELS:
3854 ret = i40e_vc_add_qch_msg(vf, msg);
3855 break;
3856 case VIRTCHNL_OP_DISABLE_CHANNELS:
3857 ret = i40e_vc_del_qch_msg(vf, msg);
3858 break;
3859 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3860 ret = i40e_vc_add_cloud_filter(vf, msg);
3861 break;
3862 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3863 ret = i40e_vc_del_cloud_filter(vf, msg);
3864 break;
3865 case VIRTCHNL_OP_UNKNOWN:
3866 default:
3867 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3868 v_opcode, local_vf_id);
3869 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3870 I40E_ERR_NOT_IMPLEMENTED);
3871 break;
3872 }
3873
3874 return ret;
3875 }
3876
3877
3878
3879
3880
3881
3882
3883
3884 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3885 {
3886 struct i40e_hw *hw = &pf->hw;
3887 u32 reg, reg_idx, bit_idx;
3888 struct i40e_vf *vf;
3889 int vf_id;
3890
3891 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3892 return 0;
3893
3894
3895
3896
3897
3898
3899 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3900 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3901 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3902 i40e_flush(hw);
3903
3904 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3905 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3906 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3907 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3908
3909 vf = &pf->vf[vf_id];
3910 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3911 if (reg & BIT(bit_idx))
3912
3913 i40e_reset_vf(vf, true);
3914 }
3915
3916 return 0;
3917 }
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
3929 {
3930 struct i40e_vsi *vsi;
3931 struct i40e_vf *vf;
3932 int ret = 0;
3933
3934 if (vf_id >= pf->num_alloc_vfs) {
3935 dev_err(&pf->pdev->dev,
3936 "Invalid VF Identifier %d\n", vf_id);
3937 ret = -EINVAL;
3938 goto err_out;
3939 }
3940 vf = &pf->vf[vf_id];
3941 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
3942 if (!vsi)
3943 ret = -EINVAL;
3944 err_out:
3945 return ret;
3946 }
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3957 {
3958 struct i40e_netdev_priv *np = netdev_priv(netdev);
3959 struct i40e_vsi *vsi = np->vsi;
3960 struct i40e_pf *pf = vsi->back;
3961 struct i40e_mac_filter *f;
3962 struct i40e_vf *vf;
3963 int ret = 0;
3964 struct hlist_node *h;
3965 int bkt;
3966 u8 i;
3967
3968 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
3969 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
3970 return -EAGAIN;
3971 }
3972
3973
3974 ret = i40e_validate_vf(pf, vf_id);
3975 if (ret)
3976 goto error_param;
3977
3978 vf = &pf->vf[vf_id];
3979 vsi = pf->vsi[vf->lan_vsi_idx];
3980
3981
3982
3983
3984
3985
3986
3987 for (i = 0; i < 15; i++) {
3988 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3989 if (i > 0)
3990 vsi = pf->vsi[vf->lan_vsi_idx];
3991 break;
3992 }
3993 msleep(20);
3994 }
3995 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3996 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3997 vf_id);
3998 ret = -EAGAIN;
3999 goto error_param;
4000 }
4001
4002 if (is_multicast_ether_addr(mac)) {
4003 dev_err(&pf->pdev->dev,
4004 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4005 ret = -EINVAL;
4006 goto error_param;
4007 }
4008
4009
4010
4011
4012 spin_lock_bh(&vsi->mac_filter_hash_lock);
4013
4014
4015 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4016 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4017
4018
4019
4020
4021 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4022 __i40e_del_filter(vsi, f);
4023
4024 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4025
4026
4027 if (i40e_sync_vsi_filters(vsi)) {
4028 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4029 ret = -EIO;
4030 goto error_param;
4031 }
4032 ether_addr_copy(vf->default_lan_addr.addr, mac);
4033
4034 if (is_zero_ether_addr(mac)) {
4035 vf->pf_set_mac = false;
4036 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4037 } else {
4038 vf->pf_set_mac = true;
4039 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4040 mac, vf_id);
4041 }
4042
4043
4044
4045
4046 i40e_vc_disable_vf(vf);
4047 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4048
4049 error_param:
4050 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4051 return ret;
4052 }
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
4063 {
4064 bool have_vlans;
4065
4066
4067
4068
4069 if (vsi->info.pvid)
4070 return false;
4071
4072
4073
4074
4075 spin_lock_bh(&vsi->mac_filter_hash_lock);
4076 have_vlans = i40e_is_vsi_in_vlan(vsi);
4077 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4078
4079 return have_vlans;
4080 }
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4093 u16 vlan_id, u8 qos, __be16 vlan_proto)
4094 {
4095 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4096 struct i40e_netdev_priv *np = netdev_priv(netdev);
4097 bool allmulti = false, alluni = false;
4098 struct i40e_pf *pf = np->vsi->back;
4099 struct i40e_vsi *vsi;
4100 struct i40e_vf *vf;
4101 int ret = 0;
4102
4103 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4104 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4105 return -EAGAIN;
4106 }
4107
4108
4109 ret = i40e_validate_vf(pf, vf_id);
4110 if (ret)
4111 goto error_pvid;
4112
4113 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4114 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4115 ret = -EINVAL;
4116 goto error_pvid;
4117 }
4118
4119 if (vlan_proto != htons(ETH_P_8021Q)) {
4120 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4121 ret = -EPROTONOSUPPORT;
4122 goto error_pvid;
4123 }
4124
4125 vf = &pf->vf[vf_id];
4126 vsi = pf->vsi[vf->lan_vsi_idx];
4127 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4128 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4129 vf_id);
4130 ret = -EAGAIN;
4131 goto error_pvid;
4132 }
4133
4134 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4135
4136 goto error_pvid;
4137
4138 if (i40e_vsi_has_vlans(vsi)) {
4139 dev_err(&pf->pdev->dev,
4140 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
4141 vf_id);
4142
4143
4144
4145
4146 i40e_vc_disable_vf(vf);
4147
4148 vsi = pf->vsi[vf->lan_vsi_idx];
4149 }
4150
4151
4152 spin_lock_bh(&vsi->mac_filter_hash_lock);
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162 if ((!(vlan_id || qos) ||
4163 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4164 vsi->info.pvid) {
4165 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4166 if (ret) {
4167 dev_info(&vsi->back->pdev->dev,
4168 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4169 vsi->back->hw.aq.asq_last_status);
4170 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4171 goto error_pvid;
4172 }
4173 }
4174
4175 if (vsi->info.pvid) {
4176
4177 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4178 VLAN_VID_MASK));
4179 }
4180
4181 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4182
4183
4184 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4185 allmulti, alluni);
4186 if (ret) {
4187 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4188 goto error_pvid;
4189 }
4190
4191 if (vlan_id || qos)
4192 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4193 else
4194 i40e_vsi_remove_pvid(vsi);
4195 spin_lock_bh(&vsi->mac_filter_hash_lock);
4196
4197 if (vlan_id) {
4198 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4199 vlan_id, qos, vf_id);
4200
4201
4202 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4203 if (ret) {
4204 dev_info(&vsi->back->pdev->dev,
4205 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4206 vsi->back->hw.aq.asq_last_status);
4207 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4208 goto error_pvid;
4209 }
4210
4211
4212 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4213 }
4214
4215 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4216
4217 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4218 alluni = true;
4219
4220 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4221 allmulti = true;
4222
4223
4224 i40e_service_event_schedule(vsi->back);
4225
4226 if (ret) {
4227 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4228 goto error_pvid;
4229 }
4230
4231
4232
4233
4234 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4235
4236 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4237 if (ret) {
4238 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4239 goto error_pvid;
4240 }
4241
4242 ret = 0;
4243
4244 error_pvid:
4245 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4246 return ret;
4247 }
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4259 int max_tx_rate)
4260 {
4261 struct i40e_netdev_priv *np = netdev_priv(netdev);
4262 struct i40e_pf *pf = np->vsi->back;
4263 struct i40e_vsi *vsi;
4264 struct i40e_vf *vf;
4265 int ret = 0;
4266
4267 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4268 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4269 return -EAGAIN;
4270 }
4271
4272
4273 ret = i40e_validate_vf(pf, vf_id);
4274 if (ret)
4275 goto error;
4276
4277 if (min_tx_rate) {
4278 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4279 min_tx_rate, vf_id);
4280 ret = -EINVAL;
4281 goto error;
4282 }
4283
4284 vf = &pf->vf[vf_id];
4285 vsi = pf->vsi[vf->lan_vsi_idx];
4286 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4287 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4288 vf_id);
4289 ret = -EAGAIN;
4290 goto error;
4291 }
4292
4293 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4294 if (ret)
4295 goto error;
4296
4297 vf->tx_rate = max_tx_rate;
4298 error:
4299 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4300 return ret;
4301 }
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311 int i40e_ndo_get_vf_config(struct net_device *netdev,
4312 int vf_id, struct ifla_vf_info *ivi)
4313 {
4314 struct i40e_netdev_priv *np = netdev_priv(netdev);
4315 struct i40e_vsi *vsi = np->vsi;
4316 struct i40e_pf *pf = vsi->back;
4317 struct i40e_vf *vf;
4318 int ret = 0;
4319
4320 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4321 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4322 return -EAGAIN;
4323 }
4324
4325
4326 ret = i40e_validate_vf(pf, vf_id);
4327 if (ret)
4328 goto error_param;
4329
4330 vf = &pf->vf[vf_id];
4331
4332 vsi = pf->vsi[vf->lan_vsi_idx];
4333 if (!vsi) {
4334 ret = -ENOENT;
4335 goto error_param;
4336 }
4337
4338 ivi->vf = vf_id;
4339
4340 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4341
4342 ivi->max_tx_rate = vf->tx_rate;
4343 ivi->min_tx_rate = 0;
4344 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4345 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4346 I40E_VLAN_PRIORITY_SHIFT;
4347 if (vf->link_forced == false)
4348 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4349 else if (vf->link_up == true)
4350 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4351 else
4352 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4353 ivi->spoofchk = vf->spoofchk;
4354 ivi->trusted = vf->trusted;
4355 ret = 0;
4356
4357 error_param:
4358 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4359 return ret;
4360 }
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4371 {
4372 struct i40e_netdev_priv *np = netdev_priv(netdev);
4373 struct i40e_pf *pf = np->vsi->back;
4374 struct virtchnl_pf_event pfe;
4375 struct i40e_hw *hw = &pf->hw;
4376 struct i40e_vf *vf;
4377 int abs_vf_id;
4378 int ret = 0;
4379
4380 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4381 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4382 return -EAGAIN;
4383 }
4384
4385
4386 if (vf_id >= pf->num_alloc_vfs) {
4387 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4388 ret = -EINVAL;
4389 goto error_out;
4390 }
4391
4392 vf = &pf->vf[vf_id];
4393 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4394
4395 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4396 pfe.severity = PF_EVENT_SEVERITY_INFO;
4397
4398 switch (link) {
4399 case IFLA_VF_LINK_STATE_AUTO:
4400 vf->link_forced = false;
4401 pfe.event_data.link_event.link_status =
4402 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4403 pfe.event_data.link_event.link_speed =
4404 (enum virtchnl_link_speed)
4405 pf->hw.phy.link_info.link_speed;
4406 break;
4407 case IFLA_VF_LINK_STATE_ENABLE:
4408 vf->link_forced = true;
4409 vf->link_up = true;
4410 pfe.event_data.link_event.link_status = true;
4411 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
4412 break;
4413 case IFLA_VF_LINK_STATE_DISABLE:
4414 vf->link_forced = true;
4415 vf->link_up = false;
4416 pfe.event_data.link_event.link_status = false;
4417 pfe.event_data.link_event.link_speed = 0;
4418 break;
4419 default:
4420 ret = -EINVAL;
4421 goto error_out;
4422 }
4423
4424 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4425 0, (u8 *)&pfe, sizeof(pfe), NULL);
4426
4427 error_out:
4428 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4429 return ret;
4430 }
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4441 {
4442 struct i40e_netdev_priv *np = netdev_priv(netdev);
4443 struct i40e_vsi *vsi = np->vsi;
4444 struct i40e_pf *pf = vsi->back;
4445 struct i40e_vsi_context ctxt;
4446 struct i40e_hw *hw = &pf->hw;
4447 struct i40e_vf *vf;
4448 int ret = 0;
4449
4450 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4451 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4452 return -EAGAIN;
4453 }
4454
4455
4456 if (vf_id >= pf->num_alloc_vfs) {
4457 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4458 ret = -EINVAL;
4459 goto out;
4460 }
4461
4462 vf = &(pf->vf[vf_id]);
4463 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4464 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4465 vf_id);
4466 ret = -EAGAIN;
4467 goto out;
4468 }
4469
4470 if (enable == vf->spoofchk)
4471 goto out;
4472
4473 vf->spoofchk = enable;
4474 memset(&ctxt, 0, sizeof(ctxt));
4475 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4476 ctxt.pf_num = pf->hw.pf_id;
4477 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4478 if (enable)
4479 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4480 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4481 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4482 if (ret) {
4483 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4484 ret);
4485 ret = -EIO;
4486 }
4487 out:
4488 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4489 return ret;
4490 }
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4501 {
4502 struct i40e_netdev_priv *np = netdev_priv(netdev);
4503 struct i40e_pf *pf = np->vsi->back;
4504 struct i40e_vf *vf;
4505 int ret = 0;
4506
4507 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4508 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4509 return -EAGAIN;
4510 }
4511
4512
4513 if (vf_id >= pf->num_alloc_vfs) {
4514 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4515 ret = -EINVAL;
4516 goto out;
4517 }
4518
4519 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4520 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4521 ret = -EINVAL;
4522 goto out;
4523 }
4524
4525 vf = &pf->vf[vf_id];
4526
4527 if (setting == vf->trusted)
4528 goto out;
4529
4530 vf->trusted = setting;
4531 i40e_vc_disable_vf(vf);
4532 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4533 vf_id, setting ? "" : "un");
4534
4535 if (vf->adq_enabled) {
4536 if (!vf->trusted) {
4537 dev_info(&pf->pdev->dev,
4538 "VF %u no longer Trusted, deleting all cloud filters\n",
4539 vf_id);
4540 i40e_del_all_cloud_filters(vf);
4541 }
4542 }
4543
4544 out:
4545 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4546 return ret;
4547 }