This source file includes following definitions.
- bnxt_hwrm_fwd_async_event_cmpl
- bnxt_vf_ndo_prep
- bnxt_set_vf_spoofchk
- bnxt_hwrm_func_qcfg_flags
- bnxt_is_trusted_vf
- bnxt_hwrm_set_trusted_vf
- bnxt_set_vf_trust
- bnxt_get_vf_config
- bnxt_set_vf_mac
- bnxt_set_vf_vlan
- bnxt_set_vf_bw
- bnxt_set_vf_link_state
- bnxt_set_vf_attr
- bnxt_hwrm_func_vf_resource_free
- bnxt_free_vf_resources
- bnxt_alloc_vf_resources
- bnxt_hwrm_func_buf_rgtr
- __bnxt_set_vf_params
- bnxt_hwrm_func_vf_resc_cfg
- bnxt_hwrm_func_cfg
- bnxt_func_cfg
- bnxt_cfg_hw_sriov
- bnxt_sriov_enable
- bnxt_sriov_disable
- bnxt_sriov_configure
- bnxt_hwrm_fwd_resp
- bnxt_hwrm_fwd_err_resp
- bnxt_hwrm_exec_fwd_resp
- bnxt_vf_configure_mac
- bnxt_vf_validate_set_mac
- bnxt_vf_set_link
- bnxt_vf_req_validate_snd
- bnxt_hwrm_exec_fwd_req
- bnxt_update_vf_mac
- bnxt_approve_mac
- bnxt_cfg_hw_sriov
- bnxt_sriov_disable
- bnxt_hwrm_exec_fwd_req
- bnxt_update_vf_mac
- bnxt_approve_mac
1
2
3
4
5
6
7
8
9
10
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/interrupt.h>
16 #include <linux/etherdevice.h>
17 #include "bnxt_hsi.h"
18 #include "bnxt.h"
19 #include "bnxt_ulp.h"
20 #include "bnxt_sriov.h"
21 #include "bnxt_vfr.h"
22 #include "bnxt_ethtool.h"
23
24 #ifdef CONFIG_BNXT_SRIOV
25 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
26 struct bnxt_vf_info *vf, u16 event_id)
27 {
28 struct hwrm_fwd_async_event_cmpl_input req = {0};
29 struct hwrm_async_event_cmpl *async_cmpl;
30 int rc = 0;
31
32 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
33 if (vf)
34 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
35 else
36
37 req.encap_async_event_target_id = cpu_to_le16(0xffff);
38 async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
39 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
40 async_cmpl->event_id = cpu_to_le16(event_id);
41
42 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
43 if (rc)
44 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
45 rc);
46 return rc;
47 }
48
49 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
50 {
51 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
52 netdev_err(bp->dev, "vf ndo called though PF is down\n");
53 return -EINVAL;
54 }
55 if (!bp->pf.active_vfs) {
56 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
57 return -EINVAL;
58 }
59 if (vf_id >= bp->pf.active_vfs) {
60 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
61 return -EINVAL;
62 }
63 return 0;
64 }
65
66 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
67 {
68 struct hwrm_func_cfg_input req = {0};
69 struct bnxt *bp = netdev_priv(dev);
70 struct bnxt_vf_info *vf;
71 bool old_setting = false;
72 u32 func_flags;
73 int rc;
74
75 if (bp->hwrm_spec_code < 0x10701)
76 return -ENOTSUPP;
77
78 rc = bnxt_vf_ndo_prep(bp, vf_id);
79 if (rc)
80 return rc;
81
82 vf = &bp->pf.vf[vf_id];
83 if (vf->flags & BNXT_VF_SPOOFCHK)
84 old_setting = true;
85 if (old_setting == setting)
86 return 0;
87
88 if (setting)
89 func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
90 else
91 func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
92
93
94
95 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
96 req.fid = cpu_to_le16(vf->fw_fid);
97 req.flags = cpu_to_le32(func_flags);
98 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
99 if (!rc) {
100 if (setting)
101 vf->flags |= BNXT_VF_SPOOFCHK;
102 else
103 vf->flags &= ~BNXT_VF_SPOOFCHK;
104 }
105 return rc;
106 }
107
108 static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
109 {
110 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
111 struct hwrm_func_qcfg_input req = {0};
112 int rc;
113
114 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
115 req.fid = cpu_to_le16(vf->fw_fid);
116 mutex_lock(&bp->hwrm_cmd_lock);
117 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
118 if (rc) {
119 mutex_unlock(&bp->hwrm_cmd_lock);
120 return rc;
121 }
122 vf->func_qcfg_flags = le16_to_cpu(resp->flags);
123 mutex_unlock(&bp->hwrm_cmd_lock);
124 return 0;
125 }
126
127 static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
128 {
129 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
130 return !!(vf->flags & BNXT_VF_TRUST);
131
132 bnxt_hwrm_func_qcfg_flags(bp, vf);
133 return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
134 }
135
136 static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
137 {
138 struct hwrm_func_cfg_input req = {0};
139 int rc;
140
141 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
142 return 0;
143
144 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
145 req.fid = cpu_to_le16(vf->fw_fid);
146 if (vf->flags & BNXT_VF_TRUST)
147 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
148 else
149 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
150 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
151 return rc;
152 }
153
154 int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
155 {
156 struct bnxt *bp = netdev_priv(dev);
157 struct bnxt_vf_info *vf;
158
159 if (bnxt_vf_ndo_prep(bp, vf_id))
160 return -EINVAL;
161
162 vf = &bp->pf.vf[vf_id];
163 if (trusted)
164 vf->flags |= BNXT_VF_TRUST;
165 else
166 vf->flags &= ~BNXT_VF_TRUST;
167
168 bnxt_hwrm_set_trusted_vf(bp, vf);
169 return 0;
170 }
171
172 int bnxt_get_vf_config(struct net_device *dev, int vf_id,
173 struct ifla_vf_info *ivi)
174 {
175 struct bnxt *bp = netdev_priv(dev);
176 struct bnxt_vf_info *vf;
177 int rc;
178
179 rc = bnxt_vf_ndo_prep(bp, vf_id);
180 if (rc)
181 return rc;
182
183 ivi->vf = vf_id;
184 vf = &bp->pf.vf[vf_id];
185
186 if (is_valid_ether_addr(vf->mac_addr))
187 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
188 else
189 memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
190 ivi->max_tx_rate = vf->max_tx_rate;
191 ivi->min_tx_rate = vf->min_tx_rate;
192 ivi->vlan = vf->vlan;
193 if (vf->flags & BNXT_VF_QOS)
194 ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
195 else
196 ivi->qos = 0;
197 ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
198 ivi->trusted = bnxt_is_trusted_vf(bp, vf);
199 if (!(vf->flags & BNXT_VF_LINK_FORCED))
200 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
201 else if (vf->flags & BNXT_VF_LINK_UP)
202 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
203 else
204 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
205
206 return 0;
207 }
208
209 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
210 {
211 struct hwrm_func_cfg_input req = {0};
212 struct bnxt *bp = netdev_priv(dev);
213 struct bnxt_vf_info *vf;
214 int rc;
215
216 rc = bnxt_vf_ndo_prep(bp, vf_id);
217 if (rc)
218 return rc;
219
220
221
222 if (is_multicast_ether_addr(mac)) {
223 netdev_err(dev, "Invalid VF ethernet address\n");
224 return -EINVAL;
225 }
226 vf = &bp->pf.vf[vf_id];
227
228 memcpy(vf->mac_addr, mac, ETH_ALEN);
229 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
230 req.fid = cpu_to_le16(vf->fw_fid);
231 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
232 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
233 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
234 }
235
236 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
237 __be16 vlan_proto)
238 {
239 struct hwrm_func_cfg_input req = {0};
240 struct bnxt *bp = netdev_priv(dev);
241 struct bnxt_vf_info *vf;
242 u16 vlan_tag;
243 int rc;
244
245 if (bp->hwrm_spec_code < 0x10201)
246 return -ENOTSUPP;
247
248 if (vlan_proto != htons(ETH_P_8021Q))
249 return -EPROTONOSUPPORT;
250
251 rc = bnxt_vf_ndo_prep(bp, vf_id);
252 if (rc)
253 return rc;
254
255
256
257
258 if (vlan_id > 4095 || qos)
259 return -EINVAL;
260
261 vf = &bp->pf.vf[vf_id];
262 vlan_tag = vlan_id;
263 if (vlan_tag == vf->vlan)
264 return 0;
265
266 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
267 req.fid = cpu_to_le16(vf->fw_fid);
268 req.dflt_vlan = cpu_to_le16(vlan_tag);
269 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
270 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
271 if (!rc)
272 vf->vlan = vlan_tag;
273 return rc;
274 }
275
276 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
277 int max_tx_rate)
278 {
279 struct hwrm_func_cfg_input req = {0};
280 struct bnxt *bp = netdev_priv(dev);
281 struct bnxt_vf_info *vf;
282 u32 pf_link_speed;
283 int rc;
284
285 rc = bnxt_vf_ndo_prep(bp, vf_id);
286 if (rc)
287 return rc;
288
289 vf = &bp->pf.vf[vf_id];
290 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
291 if (max_tx_rate > pf_link_speed) {
292 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
293 max_tx_rate, vf_id);
294 return -EINVAL;
295 }
296
297 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
298 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
299 min_tx_rate, vf_id);
300 return -EINVAL;
301 }
302 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
303 return 0;
304 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
305 req.fid = cpu_to_le16(vf->fw_fid);
306 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
307 req.max_bw = cpu_to_le32(max_tx_rate);
308 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
309 req.min_bw = cpu_to_le32(min_tx_rate);
310 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
311 if (!rc) {
312 vf->min_tx_rate = min_tx_rate;
313 vf->max_tx_rate = max_tx_rate;
314 }
315 return rc;
316 }
317
318 int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
319 {
320 struct bnxt *bp = netdev_priv(dev);
321 struct bnxt_vf_info *vf;
322 int rc;
323
324 rc = bnxt_vf_ndo_prep(bp, vf_id);
325 if (rc)
326 return rc;
327
328 vf = &bp->pf.vf[vf_id];
329
330 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
331 switch (link) {
332 case IFLA_VF_LINK_STATE_AUTO:
333 vf->flags |= BNXT_VF_LINK_UP;
334 break;
335 case IFLA_VF_LINK_STATE_DISABLE:
336 vf->flags |= BNXT_VF_LINK_FORCED;
337 break;
338 case IFLA_VF_LINK_STATE_ENABLE:
339 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
340 break;
341 default:
342 netdev_err(bp->dev, "Invalid link option\n");
343 rc = -EINVAL;
344 break;
345 }
346 if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
347 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
348 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
349 return rc;
350 }
351
352 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
353 {
354 int i;
355 struct bnxt_vf_info *vf;
356
357 for (i = 0; i < num_vfs; i++) {
358 vf = &bp->pf.vf[i];
359 memset(vf, 0, sizeof(*vf));
360 }
361 return 0;
362 }
363
364 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
365 {
366 int i, rc = 0;
367 struct bnxt_pf_info *pf = &bp->pf;
368 struct hwrm_func_vf_resc_free_input req = {0};
369
370 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
371
372 mutex_lock(&bp->hwrm_cmd_lock);
373 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
374 req.vf_id = cpu_to_le16(i);
375 rc = _hwrm_send_message(bp, &req, sizeof(req),
376 HWRM_CMD_TIMEOUT);
377 if (rc)
378 break;
379 }
380 mutex_unlock(&bp->hwrm_cmd_lock);
381 return rc;
382 }
383
384 static void bnxt_free_vf_resources(struct bnxt *bp)
385 {
386 struct pci_dev *pdev = bp->pdev;
387 int i;
388
389 kfree(bp->pf.vf_event_bmap);
390 bp->pf.vf_event_bmap = NULL;
391
392 for (i = 0; i < 4; i++) {
393 if (bp->pf.hwrm_cmd_req_addr[i]) {
394 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
395 bp->pf.hwrm_cmd_req_addr[i],
396 bp->pf.hwrm_cmd_req_dma_addr[i]);
397 bp->pf.hwrm_cmd_req_addr[i] = NULL;
398 }
399 }
400
401 kfree(bp->pf.vf);
402 bp->pf.vf = NULL;
403 }
404
405 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
406 {
407 struct pci_dev *pdev = bp->pdev;
408 u32 nr_pages, size, i, j, k = 0;
409
410 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
411 if (!bp->pf.vf)
412 return -ENOMEM;
413
414 bnxt_set_vf_attr(bp, num_vfs);
415
416 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
417 nr_pages = size / BNXT_PAGE_SIZE;
418 if (size & (BNXT_PAGE_SIZE - 1))
419 nr_pages++;
420
421 for (i = 0; i < nr_pages; i++) {
422 bp->pf.hwrm_cmd_req_addr[i] =
423 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
424 &bp->pf.hwrm_cmd_req_dma_addr[i],
425 GFP_KERNEL);
426
427 if (!bp->pf.hwrm_cmd_req_addr[i])
428 return -ENOMEM;
429
430 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
431 struct bnxt_vf_info *vf = &bp->pf.vf[k];
432
433 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
434 j * BNXT_HWRM_REQ_MAX_SIZE;
435 vf->hwrm_cmd_req_dma_addr =
436 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
437 BNXT_HWRM_REQ_MAX_SIZE;
438 k++;
439 }
440 }
441
442
443 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
444 if (!bp->pf.vf_event_bmap)
445 return -ENOMEM;
446
447 bp->pf.hwrm_cmd_req_pages = nr_pages;
448 return 0;
449 }
450
451 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
452 {
453 struct hwrm_func_buf_rgtr_input req = {0};
454
455 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
456
457 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
458 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
459 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
460 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
461 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
462 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
463 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
464
465 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
466 }
467
468
469 static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
470 {
471 struct hwrm_func_cfg_input req = {0};
472 struct bnxt_vf_info *vf;
473
474 vf = &bp->pf.vf[vf_id];
475 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
476 req.fid = cpu_to_le16(vf->fw_fid);
477
478 if (is_valid_ether_addr(vf->mac_addr)) {
479 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
480 memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
481 }
482 if (vf->vlan) {
483 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
484 req.dflt_vlan = cpu_to_le16(vf->vlan);
485 }
486 if (vf->max_tx_rate) {
487 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
488 req.max_bw = cpu_to_le32(vf->max_tx_rate);
489 #ifdef HAVE_IFLA_TX_RATE
490 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
491 req.min_bw = cpu_to_le32(vf->min_tx_rate);
492 #endif
493 }
494 if (vf->flags & BNXT_VF_TRUST)
495 req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
496
497 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
498 }
499
500
501
502
503 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
504 {
505 struct hwrm_func_vf_resource_cfg_input req = {0};
506 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
507 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
508 u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
509 struct bnxt_pf_info *pf = &bp->pf;
510 int i, rc = 0, min = 1;
511 u16 vf_msix = 0;
512
513 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
514
515 if (bp->flags & BNXT_FLAG_CHIP_P5) {
516 vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
517 vf_ring_grps = 0;
518 } else {
519 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
520 }
521 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
522 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
523 if (bp->flags & BNXT_FLAG_AGG_RINGS)
524 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
525 else
526 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
527 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
528 vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
529 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
530
531 req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
532 req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
533 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
534 min = 0;
535 req.min_rsscos_ctx = cpu_to_le16(min);
536 }
537 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
538 pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
539 req.min_cmpl_rings = cpu_to_le16(min);
540 req.min_tx_rings = cpu_to_le16(min);
541 req.min_rx_rings = cpu_to_le16(min);
542 req.min_l2_ctxs = cpu_to_le16(min);
543 req.min_vnics = cpu_to_le16(min);
544 req.min_stat_ctx = cpu_to_le16(min);
545 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
546 req.min_hw_ring_grps = cpu_to_le16(min);
547 } else {
548 vf_cp_rings /= num_vfs;
549 vf_tx_rings /= num_vfs;
550 vf_rx_rings /= num_vfs;
551 vf_vnics /= num_vfs;
552 vf_stat_ctx /= num_vfs;
553 vf_ring_grps /= num_vfs;
554
555 req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
556 req.min_tx_rings = cpu_to_le16(vf_tx_rings);
557 req.min_rx_rings = cpu_to_le16(vf_rx_rings);
558 req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
559 req.min_vnics = cpu_to_le16(vf_vnics);
560 req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
561 req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
562 }
563 req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
564 req.max_tx_rings = cpu_to_le16(vf_tx_rings);
565 req.max_rx_rings = cpu_to_le16(vf_rx_rings);
566 req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
567 req.max_vnics = cpu_to_le16(vf_vnics);
568 req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
569 req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
570 if (bp->flags & BNXT_FLAG_CHIP_P5)
571 req.max_msix = cpu_to_le16(vf_msix / num_vfs);
572
573 mutex_lock(&bp->hwrm_cmd_lock);
574 for (i = 0; i < num_vfs; i++) {
575 if (reset)
576 __bnxt_set_vf_params(bp, i);
577
578 req.vf_id = cpu_to_le16(pf->first_vf_id + i);
579 rc = _hwrm_send_message(bp, &req, sizeof(req),
580 HWRM_CMD_TIMEOUT);
581 if (rc)
582 break;
583 pf->active_vfs = i + 1;
584 pf->vf[i].fw_fid = pf->first_vf_id + i;
585 }
586 mutex_unlock(&bp->hwrm_cmd_lock);
587 if (pf->active_vfs) {
588 u16 n = pf->active_vfs;
589
590 hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
591 hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
592 hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
593 n;
594 hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
595 hw_resc->max_rsscos_ctxs -= pf->active_vfs;
596 hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
597 hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
598 if (bp->flags & BNXT_FLAG_CHIP_P5)
599 hw_resc->max_irqs -= vf_msix * n;
600
601 rc = pf->active_vfs;
602 }
603 return rc;
604 }
605
606
607
608
609 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
610 {
611 u32 rc = 0, mtu, i;
612 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
613 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
614 struct hwrm_func_cfg_input req = {0};
615 struct bnxt_pf_info *pf = &bp->pf;
616 int total_vf_tx_rings = 0;
617 u16 vf_ring_grps;
618
619 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
620
621
622 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
623 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
624 if (bp->flags & BNXT_FLAG_AGG_RINGS)
625 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
626 num_vfs;
627 else
628 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
629 num_vfs;
630 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
631 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
632 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
633 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
634
635 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
636 FUNC_CFG_REQ_ENABLES_MRU |
637 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
638 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
639 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
640 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
641 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
642 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
643 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
644 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
645
646 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
647 req.mru = cpu_to_le16(mtu);
648 req.mtu = cpu_to_le16(mtu);
649
650 req.num_rsscos_ctxs = cpu_to_le16(1);
651 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
652 req.num_tx_rings = cpu_to_le16(vf_tx_rings);
653 req.num_rx_rings = cpu_to_le16(vf_rx_rings);
654 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
655 req.num_l2_ctxs = cpu_to_le16(4);
656
657 req.num_vnics = cpu_to_le16(vf_vnics);
658
659 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
660
661 mutex_lock(&bp->hwrm_cmd_lock);
662 for (i = 0; i < num_vfs; i++) {
663 int vf_tx_rsvd = vf_tx_rings;
664
665 req.fid = cpu_to_le16(pf->first_vf_id + i);
666 rc = _hwrm_send_message(bp, &req, sizeof(req),
667 HWRM_CMD_TIMEOUT);
668 if (rc)
669 break;
670 pf->active_vfs = i + 1;
671 pf->vf[i].fw_fid = le16_to_cpu(req.fid);
672 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
673 &vf_tx_rsvd);
674 if (rc)
675 break;
676 total_vf_tx_rings += vf_tx_rsvd;
677 }
678 mutex_unlock(&bp->hwrm_cmd_lock);
679 if (pf->active_vfs) {
680 hw_resc->max_tx_rings -= total_vf_tx_rings;
681 hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
682 hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
683 hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
684 hw_resc->max_rsscos_ctxs -= num_vfs;
685 hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
686 hw_resc->max_vnics -= vf_vnics * num_vfs;
687 rc = pf->active_vfs;
688 }
689 return rc;
690 }
691
692 static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
693 {
694 if (BNXT_NEW_RM(bp))
695 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
696 else
697 return bnxt_hwrm_func_cfg(bp, num_vfs);
698 }
699
700 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
701 {
702 int rc;
703
704
705 rc = bnxt_hwrm_func_buf_rgtr(bp);
706 if (rc)
707 return rc;
708
709
710 rc = bnxt_func_cfg(bp, *num_vfs, reset);
711 if (rc != *num_vfs) {
712 if (rc <= 0) {
713 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
714 *num_vfs = 0;
715 return rc;
716 }
717 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
718 rc);
719 *num_vfs = rc;
720 }
721
722 bnxt_ulp_sriov_cfg(bp, *num_vfs);
723 return 0;
724 }
725
726 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
727 {
728 int rc = 0, vfs_supported;
729 int min_rx_rings, min_tx_rings, min_rss_ctxs;
730 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
731 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
732 int avail_cp, avail_stat;
733
734
735
736
737
738 vfs_supported = *num_vfs;
739
740 avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
741 avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
742 avail_cp = min_t(int, avail_cp, avail_stat);
743
744 while (vfs_supported) {
745 min_rx_rings = vfs_supported;
746 min_tx_rings = vfs_supported;
747 min_rss_ctxs = vfs_supported;
748
749 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
750 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
751 min_rx_rings)
752 rx_ok = 1;
753 } else {
754 if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
755 min_rx_rings)
756 rx_ok = 1;
757 }
758 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
759 avail_cp < min_rx_rings)
760 rx_ok = 0;
761
762 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
763 avail_cp >= min_tx_rings)
764 tx_ok = 1;
765
766 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
767 min_rss_ctxs)
768 rss_ok = 1;
769
770 if (tx_ok && rx_ok && rss_ok)
771 break;
772
773 vfs_supported--;
774 }
775
776 if (!vfs_supported) {
777 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
778 return -EINVAL;
779 }
780
781 if (vfs_supported != *num_vfs) {
782 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
783 *num_vfs, vfs_supported);
784 *num_vfs = vfs_supported;
785 }
786
787 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
788 if (rc)
789 goto err_out1;
790
791 rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
792 if (rc)
793 goto err_out2;
794
795 rc = pci_enable_sriov(bp->pdev, *num_vfs);
796 if (rc)
797 goto err_out2;
798
799 return 0;
800
801 err_out2:
802
803 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
804
805 err_out1:
806 bnxt_free_vf_resources(bp);
807
808 return rc;
809 }
810
811 void bnxt_sriov_disable(struct bnxt *bp)
812 {
813 u16 num_vfs = pci_num_vf(bp->pdev);
814
815 if (!num_vfs)
816 return;
817
818
819 mutex_lock(&bp->sriov_lock);
820 bnxt_vf_reps_destroy(bp);
821
822 if (pci_vfs_assigned(bp->pdev)) {
823 bnxt_hwrm_fwd_async_event_cmpl(
824 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
825 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
826 num_vfs);
827 } else {
828 pci_disable_sriov(bp->pdev);
829
830 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
831 }
832 mutex_unlock(&bp->sriov_lock);
833
834 bnxt_free_vf_resources(bp);
835
836 bp->pf.active_vfs = 0;
837
838 rtnl_lock();
839 bnxt_restore_pf_fw_resources(bp);
840 rtnl_unlock();
841
842 bnxt_ulp_sriov_cfg(bp, 0);
843 }
844
845 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
846 {
847 struct net_device *dev = pci_get_drvdata(pdev);
848 struct bnxt *bp = netdev_priv(dev);
849
850 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
851 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
852 return 0;
853 }
854
855 rtnl_lock();
856 if (!netif_running(dev)) {
857 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
858 rtnl_unlock();
859 return 0;
860 }
861 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
862 netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
863 rtnl_unlock();
864 return 0;
865 }
866 bp->sriov_cfg = true;
867 rtnl_unlock();
868
869 if (pci_vfs_assigned(bp->pdev)) {
870 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
871 num_vfs = 0;
872 goto sriov_cfg_exit;
873 }
874
875
876 if (num_vfs && num_vfs == bp->pf.active_vfs)
877 goto sriov_cfg_exit;
878
879
880 bnxt_sriov_disable(bp);
881 if (!num_vfs)
882 goto sriov_cfg_exit;
883
884 bnxt_sriov_enable(bp, &num_vfs);
885
886 sriov_cfg_exit:
887 bp->sriov_cfg = false;
888 wake_up(&bp->sriov_cfg_wait);
889
890 return num_vfs;
891 }
892
893 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
894 void *encap_resp, __le64 encap_resp_addr,
895 __le16 encap_resp_cpr, u32 msg_size)
896 {
897 int rc = 0;
898 struct hwrm_fwd_resp_input req = {0};
899
900 if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
901 return -EINVAL;
902
903 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
904
905
906 req.target_id = cpu_to_le16(vf->fw_fid);
907 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
908 req.encap_resp_len = cpu_to_le16(msg_size);
909 req.encap_resp_addr = encap_resp_addr;
910 req.encap_resp_cmpl_ring = encap_resp_cpr;
911 memcpy(req.encap_resp, encap_resp, msg_size);
912
913 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
914 if (rc)
915 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
916 return rc;
917 }
918
919 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
920 u32 msg_size)
921 {
922 int rc = 0;
923 struct hwrm_reject_fwd_resp_input req = {0};
924
925 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
926 return -EINVAL;
927
928 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
929
930 req.target_id = cpu_to_le16(vf->fw_fid);
931 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
932 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
933
934 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
935 if (rc)
936 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
937 return rc;
938 }
939
940 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
941 u32 msg_size)
942 {
943 int rc = 0;
944 struct hwrm_exec_fwd_resp_input req = {0};
945
946 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
947 return -EINVAL;
948
949 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
950
951 req.target_id = cpu_to_le16(vf->fw_fid);
952 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
953 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
954
955 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
956 if (rc)
957 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
958 return rc;
959 }
960
961 static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
962 {
963 u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
964 struct hwrm_func_vf_cfg_input *req =
965 (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
966
967
968
969
970 if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
971 bool trust = bnxt_is_trusted_vf(bp, vf);
972
973 if (is_valid_ether_addr(req->dflt_mac_addr) &&
974 (trust || !is_valid_ether_addr(vf->mac_addr) ||
975 ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
976 ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
977 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
978 }
979 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
980 }
981 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
982 }
983
984 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
985 {
986 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
987 struct hwrm_cfa_l2_filter_alloc_input *req =
988 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
989 bool mac_ok = false;
990
991 if (!is_valid_ether_addr((const u8 *)req->l2_addr))
992 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
993
994
995
996
997
998
999 if (bnxt_is_trusted_vf(bp, vf)) {
1000 mac_ok = true;
1001 } else if (is_valid_ether_addr(vf->mac_addr)) {
1002 if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1003 mac_ok = true;
1004 } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1005 if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1006 mac_ok = true;
1007 } else {
1008
1009
1010
1011
1012
1013
1014 mac_ok = true;
1015 }
1016 if (mac_ok)
1017 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1018 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1019 }
1020
1021 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1022 {
1023 int rc = 0;
1024
1025 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1026
1027 rc = bnxt_hwrm_exec_fwd_resp(
1028 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1029 } else {
1030 struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
1031 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1032
1033 phy_qcfg_req =
1034 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1035 mutex_lock(&bp->hwrm_cmd_lock);
1036 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1037 sizeof(phy_qcfg_resp));
1038 mutex_unlock(&bp->hwrm_cmd_lock);
1039 phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1040 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1041 phy_qcfg_resp.valid = 1;
1042
1043 if (vf->flags & BNXT_VF_LINK_UP) {
1044
1045 if (phy_qcfg_resp.link !=
1046 PORT_PHY_QCFG_RESP_LINK_LINK) {
1047 phy_qcfg_resp.link =
1048 PORT_PHY_QCFG_RESP_LINK_LINK;
1049 phy_qcfg_resp.link_speed = cpu_to_le16(
1050 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1051 phy_qcfg_resp.duplex_cfg =
1052 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1053 phy_qcfg_resp.duplex_state =
1054 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1055 phy_qcfg_resp.pause =
1056 (PORT_PHY_QCFG_RESP_PAUSE_TX |
1057 PORT_PHY_QCFG_RESP_PAUSE_RX);
1058 }
1059 } else {
1060
1061 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1062 phy_qcfg_resp.link_speed = 0;
1063 phy_qcfg_resp.duplex_state =
1064 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1065 phy_qcfg_resp.pause = 0;
1066 }
1067 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1068 phy_qcfg_req->resp_addr,
1069 phy_qcfg_req->cmpl_ring,
1070 sizeof(phy_qcfg_resp));
1071 }
1072 return rc;
1073 }
1074
1075 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1076 {
1077 int rc = 0;
1078 struct input *encap_req = vf->hwrm_cmd_req_addr;
1079 u32 req_type = le16_to_cpu(encap_req->req_type);
1080
1081 switch (req_type) {
1082 case HWRM_FUNC_VF_CFG:
1083 rc = bnxt_vf_configure_mac(bp, vf);
1084 break;
1085 case HWRM_CFA_L2_FILTER_ALLOC:
1086 rc = bnxt_vf_validate_set_mac(bp, vf);
1087 break;
1088 case HWRM_FUNC_CFG:
1089
1090
1091
1092 rc = bnxt_hwrm_exec_fwd_resp(
1093 bp, vf, sizeof(struct hwrm_func_cfg_input));
1094 break;
1095 case HWRM_PORT_PHY_QCFG:
1096 rc = bnxt_vf_set_link(bp, vf);
1097 break;
1098 default:
1099 break;
1100 }
1101 return rc;
1102 }
1103
1104 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1105 {
1106 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1107
1108
1109 while (1) {
1110 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1111 if (vf_id >= active_vfs)
1112 break;
1113
1114 clear_bit(vf_id, bp->pf.vf_event_bmap);
1115 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1116 i = vf_id + 1;
1117 }
1118 }
1119
1120 void bnxt_update_vf_mac(struct bnxt *bp)
1121 {
1122 struct hwrm_func_qcaps_input req = {0};
1123 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1124
1125 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
1126 req.fid = cpu_to_le16(0xffff);
1127
1128 mutex_lock(&bp->hwrm_cmd_lock);
1129 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
1130 goto update_vf_mac_exit;
1131
1132
1133
1134
1135
1136
1137
1138
1139 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
1140 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1141
1142
1143 if (is_valid_ether_addr(bp->vf.mac_addr))
1144 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
1145 update_vf_mac_exit:
1146 mutex_unlock(&bp->hwrm_cmd_lock);
1147 }
1148
1149 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1150 {
1151 struct hwrm_func_vf_cfg_input req = {0};
1152 int rc = 0;
1153
1154 if (!BNXT_VF(bp))
1155 return 0;
1156
1157 if (bp->hwrm_spec_code < 0x10202) {
1158 if (is_valid_ether_addr(bp->vf.mac_addr))
1159 rc = -EADDRNOTAVAIL;
1160 goto mac_done;
1161 }
1162 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
1163 req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1164 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1165 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1166 mac_done:
1167 if (rc && strict) {
1168 rc = -EADDRNOTAVAIL;
1169 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1170 mac);
1171 return rc;
1172 }
1173 return 0;
1174 }
1175 #else
1176
1177 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1178 {
1179 if (*num_vfs)
1180 return -EOPNOTSUPP;
1181 return 0;
1182 }
1183
1184 void bnxt_sriov_disable(struct bnxt *bp)
1185 {
1186 }
1187
1188 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1189 {
1190 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1191 }
1192
1193 void bnxt_update_vf_mac(struct bnxt *bp)
1194 {
1195 }
1196
1197 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1198 {
1199 return 0;
1200 }
1201 #endif