1/******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27#include "i40evf.h" 28#include "i40e_prototype.h" 29 30/* busy wait delay in msec */ 31#define I40EVF_BUSY_WAIT_DELAY 10 32#define I40EVF_BUSY_WAIT_COUNT 50 33 34/** 35 * i40evf_send_pf_msg 36 * @adapter: adapter structure 37 * @op: virtual channel opcode 38 * @msg: pointer to message buffer 39 * @len: message length 40 * 41 * Send message to PF and print status if failure. 42 **/ 43static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, 44 enum i40e_virtchnl_ops op, u8 *msg, u16 len) 45{ 46 struct i40e_hw *hw = &adapter->hw; 47 i40e_status err; 48 49 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) 50 return 0; /* nothing to see here, move along */ 51 52 err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 53 if (err) 54 dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", 55 op, i40evf_stat_str(hw, err), 56 i40evf_aq_str(hw, hw->aq.asq_last_status)); 57 return err; 58} 59 60/** 61 * i40evf_send_api_ver 62 * @adapter: adapter structure 63 * 64 * Send API version admin queue message to the PF. The reply is not checked 65 * in this function. Returns 0 if the message was successfully 66 * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. 67 **/ 68int i40evf_send_api_ver(struct i40evf_adapter *adapter) 69{ 70 struct i40e_virtchnl_version_info vvi; 71 72 vvi.major = I40E_VIRTCHNL_VERSION_MAJOR; 73 vvi.minor = I40E_VIRTCHNL_VERSION_MINOR; 74 75 return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi, 76 sizeof(vvi)); 77} 78 79/** 80 * i40evf_verify_api_ver 81 * @adapter: adapter structure 82 * 83 * Compare API versions with the PF. Must be called after admin queue is 84 * initialized. Returns 0 if API versions match, -EIO if they do not, 85 * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors 86 * from the firmware are propagated. 87 **/ 88int i40evf_verify_api_ver(struct i40evf_adapter *adapter) 89{ 90 struct i40e_virtchnl_version_info *pf_vvi; 91 struct i40e_hw *hw = &adapter->hw; 92 struct i40e_arq_event_info event; 93 enum i40e_virtchnl_ops op; 94 i40e_status err; 95 96 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; 97 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 98 if (!event.msg_buf) { 99 err = -ENOMEM; 100 goto out; 101 } 102 103 while (1) { 104 err = i40evf_clean_arq_element(hw, &event, NULL); 105 /* When the AQ is empty, i40evf_clean_arq_element will return 106 * nonzero and this loop will terminate. 107 */ 108 if (err) 109 goto out_alloc; 110 op = 111 (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 112 if (op == I40E_VIRTCHNL_OP_VERSION) 113 break; 114 } 115 116 117 err = (i40e_status)le32_to_cpu(event.desc.cookie_low); 118 if (err) 119 goto out_alloc; 120 121 if (op != I40E_VIRTCHNL_OP_VERSION) { 122 dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", 123 op); 124 err = -EIO; 125 goto out_alloc; 126 } 127 128 pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; 129 adapter->pf_version = *pf_vvi; 130 131 if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) || 132 ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) && 133 (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) 134 err = -EIO; 135 136out_alloc: 137 kfree(event.msg_buf); 138out: 139 return err; 140} 141 142/** 143 * i40evf_send_vf_config_msg 144 * @adapter: adapter structure 145 * 146 * Send VF configuration request admin queue message to the PF. The reply 147 * is not checked in this function. Returns 0 if the message was 148 * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. 149 **/ 150int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) 151{ 152 u32 caps; 153 154 adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; 155 adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; 156 caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | 157 I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | 158 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | 159 I40E_VIRTCHNL_VF_OFFLOAD_VLAN | 160 I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 161 adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; 162 adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; 163 if (PF_IS_V11(adapter)) 164 return i40evf_send_pf_msg(adapter, 165 I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 166 (u8 *)&caps, sizeof(caps)); 167 else 168 return i40evf_send_pf_msg(adapter, 169 I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 170 NULL, 0); 171} 172 173/** 174 * i40evf_get_vf_config 175 * @hw: pointer to the hardware structure 176 * @len: length of buffer 177 * 178 * Get VF configuration from PF and populate hw structure. Must be called after 179 * admin queue is initialized. Busy waits until response is received from PF, 180 * with maximum timeout. Response from PF is returned in the buffer for further 181 * processing by the caller. 182 **/ 183int i40evf_get_vf_config(struct i40evf_adapter *adapter) 184{ 185 struct i40e_hw *hw = &adapter->hw; 186 struct i40e_arq_event_info event; 187 enum i40e_virtchnl_ops op; 188 i40e_status err; 189 u16 len; 190 191 len = sizeof(struct i40e_virtchnl_vf_resource) + 192 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); 193 event.buf_len = len; 194 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 195 if (!event.msg_buf) { 196 err = -ENOMEM; 197 goto out; 198 } 199 200 while (1) { 201 /* When the AQ is empty, i40evf_clean_arq_element will return 202 * nonzero and this loop will terminate. 203 */ 204 err = i40evf_clean_arq_element(hw, &event, NULL); 205 if (err) 206 goto out_alloc; 207 op = 208 (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 209 if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES) 210 break; 211 } 212 213 err = (i40e_status)le32_to_cpu(event.desc.cookie_low); 214 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); 215 216 i40e_vf_parse_hw_config(hw, adapter->vf_res); 217out_alloc: 218 kfree(event.msg_buf); 219out: 220 return err; 221} 222 223/** 224 * i40evf_configure_queues 225 * @adapter: adapter structure 226 * 227 * Request that the PF set up our (previously allocated) queues. 228 **/ 229void i40evf_configure_queues(struct i40evf_adapter *adapter) 230{ 231 struct i40e_virtchnl_vsi_queue_config_info *vqci; 232 struct i40e_virtchnl_queue_pair_info *vqpi; 233 int pairs = adapter->num_active_queues; 234 int i, len; 235 236 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 237 /* bail because we already have a command pending */ 238 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", 239 adapter->current_op); 240 return; 241 } 242 adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; 243 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + 244 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); 245 vqci = kzalloc(len, GFP_ATOMIC); 246 if (!vqci) 247 return; 248 249 vqci->vsi_id = adapter->vsi_res->vsi_id; 250 vqci->num_queue_pairs = pairs; 251 vqpi = vqci->qpair; 252 /* Size check is not needed here - HW max is 16 queue pairs, and we 253 * can fit info for 31 of them into the AQ buffer before it overflows. 254 */ 255 for (i = 0; i < pairs; i++) { 256 vqpi->txq.vsi_id = vqci->vsi_id; 257 vqpi->txq.queue_id = i; 258 vqpi->txq.ring_len = adapter->tx_rings[i]->count; 259 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; 260 vqpi->txq.headwb_enabled = 1; 261 vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr + 262 (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc)); 263 264 vqpi->rxq.vsi_id = vqci->vsi_id; 265 vqpi->rxq.queue_id = i; 266 vqpi->rxq.ring_len = adapter->rx_rings[i]->count; 267 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma; 268 vqpi->rxq.max_pkt_size = adapter->netdev->mtu 269 + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; 270 vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len; 271 vqpi++; 272 } 273 274 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; 275 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 276 (u8 *)vqci, len); 277 kfree(vqci); 278} 279 280/** 281 * i40evf_enable_queues 282 * @adapter: adapter structure 283 * 284 * Request that the PF enable all of our queues. 285 **/ 286void i40evf_enable_queues(struct i40evf_adapter *adapter) 287{ 288 struct i40e_virtchnl_queue_select vqs; 289 290 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 291 /* bail because we already have a command pending */ 292 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", 293 adapter->current_op); 294 return; 295 } 296 adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; 297 vqs.vsi_id = adapter->vsi_res->vsi_id; 298 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 299 vqs.rx_queues = vqs.tx_queues; 300 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; 301 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 302 (u8 *)&vqs, sizeof(vqs)); 303} 304 305/** 306 * i40evf_disable_queues 307 * @adapter: adapter structure 308 * 309 * Request that the PF disable all of our queues. 310 **/ 311void i40evf_disable_queues(struct i40evf_adapter *adapter) 312{ 313 struct i40e_virtchnl_queue_select vqs; 314 315 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 316 /* bail because we already have a command pending */ 317 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", 318 adapter->current_op); 319 return; 320 } 321 adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; 322 vqs.vsi_id = adapter->vsi_res->vsi_id; 323 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 324 vqs.rx_queues = vqs.tx_queues; 325 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; 326 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 327 (u8 *)&vqs, sizeof(vqs)); 328} 329 330/** 331 * i40evf_map_queues 332 * @adapter: adapter structure 333 * 334 * Request that the PF map queues to interrupt vectors. Misc causes, including 335 * admin queue, are always mapped to vector 0. 336 **/ 337void i40evf_map_queues(struct i40evf_adapter *adapter) 338{ 339 struct i40e_virtchnl_irq_map_info *vimi; 340 int v_idx, q_vectors, len; 341 struct i40e_q_vector *q_vector; 342 343 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 344 /* bail because we already have a command pending */ 345 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", 346 adapter->current_op); 347 return; 348 } 349 adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; 350 351 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 352 353 len = sizeof(struct i40e_virtchnl_irq_map_info) + 354 (adapter->num_msix_vectors * 355 sizeof(struct i40e_virtchnl_vector_map)); 356 vimi = kzalloc(len, GFP_ATOMIC); 357 if (!vimi) 358 return; 359 360 vimi->num_vectors = adapter->num_msix_vectors; 361 /* Queue vectors first */ 362 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 363 q_vector = adapter->q_vector[v_idx]; 364 vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; 365 vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS; 366 vimi->vecmap[v_idx].txq_map = q_vector->ring_mask; 367 vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask; 368 } 369 /* Misc vector last - this is only for AdminQ messages */ 370 vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; 371 vimi->vecmap[v_idx].vector_id = 0; 372 vimi->vecmap[v_idx].txq_map = 0; 373 vimi->vecmap[v_idx].rxq_map = 0; 374 375 adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; 376 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 377 (u8 *)vimi, len); 378 kfree(vimi); 379} 380 381/** 382 * i40evf_add_ether_addrs 383 * @adapter: adapter structure 384 * @addrs: the MAC address filters to add (contiguous) 385 * @count: number of filters 386 * 387 * Request that the PF add one or more addresses to our filters. 388 **/ 389void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) 390{ 391 struct i40e_virtchnl_ether_addr_list *veal; 392 int len, i = 0, count = 0; 393 struct i40evf_mac_filter *f; 394 395 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 396 /* bail because we already have a command pending */ 397 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", 398 adapter->current_op); 399 return; 400 } 401 list_for_each_entry(f, &adapter->mac_filter_list, list) { 402 if (f->add) 403 count++; 404 } 405 if (!count) { 406 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; 407 return; 408 } 409 adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; 410 411 len = sizeof(struct i40e_virtchnl_ether_addr_list) + 412 (count * sizeof(struct i40e_virtchnl_ether_addr)); 413 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 414 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); 415 count = (I40EVF_MAX_AQ_BUF_SIZE - 416 sizeof(struct i40e_virtchnl_ether_addr_list)) / 417 sizeof(struct i40e_virtchnl_ether_addr); 418 len = I40EVF_MAX_AQ_BUF_SIZE; 419 } 420 421 veal = kzalloc(len, GFP_ATOMIC); 422 if (!veal) 423 return; 424 425 veal->vsi_id = adapter->vsi_res->vsi_id; 426 veal->num_elements = count; 427 list_for_each_entry(f, &adapter->mac_filter_list, list) { 428 if (f->add) { 429 ether_addr_copy(veal->list[i].addr, f->macaddr); 430 i++; 431 f->add = false; 432 } 433 } 434 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; 435 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 436 (u8 *)veal, len); 437 kfree(veal); 438} 439 440/** 441 * i40evf_del_ether_addrs 442 * @adapter: adapter structure 443 * @addrs: the MAC address filters to remove (contiguous) 444 * @count: number of filtes 445 * 446 * Request that the PF remove one or more addresses from our filters. 447 **/ 448void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) 449{ 450 struct i40e_virtchnl_ether_addr_list *veal; 451 struct i40evf_mac_filter *f, *ftmp; 452 int len, i = 0, count = 0; 453 454 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 455 /* bail because we already have a command pending */ 456 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", 457 adapter->current_op); 458 return; 459 } 460 list_for_each_entry(f, &adapter->mac_filter_list, list) { 461 if (f->remove) 462 count++; 463 } 464 if (!count) { 465 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; 466 return; 467 } 468 adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; 469 470 len = sizeof(struct i40e_virtchnl_ether_addr_list) + 471 (count * sizeof(struct i40e_virtchnl_ether_addr)); 472 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 473 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); 474 count = (I40EVF_MAX_AQ_BUF_SIZE - 475 sizeof(struct i40e_virtchnl_ether_addr_list)) / 476 sizeof(struct i40e_virtchnl_ether_addr); 477 len = I40EVF_MAX_AQ_BUF_SIZE; 478 } 479 veal = kzalloc(len, GFP_ATOMIC); 480 if (!veal) 481 return; 482 483 veal->vsi_id = adapter->vsi_res->vsi_id; 484 veal->num_elements = count; 485 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 486 if (f->remove) { 487 ether_addr_copy(veal->list[i].addr, f->macaddr); 488 i++; 489 list_del(&f->list); 490 kfree(f); 491 } 492 } 493 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; 494 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 495 (u8 *)veal, len); 496 kfree(veal); 497} 498 499/** 500 * i40evf_add_vlans 501 * @adapter: adapter structure 502 * @vlans: the VLANs to add 503 * @count: number of VLANs 504 * 505 * Request that the PF add one or more VLAN filters to our VSI. 506 **/ 507void i40evf_add_vlans(struct i40evf_adapter *adapter) 508{ 509 struct i40e_virtchnl_vlan_filter_list *vvfl; 510 int len, i = 0, count = 0; 511 struct i40evf_vlan_filter *f; 512 513 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 514 /* bail because we already have a command pending */ 515 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", 516 adapter->current_op); 517 return; 518 } 519 520 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 521 if (f->add) 522 count++; 523 } 524 if (!count) { 525 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; 526 return; 527 } 528 adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN; 529 530 len = sizeof(struct i40e_virtchnl_vlan_filter_list) + 531 (count * sizeof(u16)); 532 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 533 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 534 count = (I40EVF_MAX_AQ_BUF_SIZE - 535 sizeof(struct i40e_virtchnl_vlan_filter_list)) / 536 sizeof(u16); 537 len = I40EVF_MAX_AQ_BUF_SIZE; 538 } 539 vvfl = kzalloc(len, GFP_ATOMIC); 540 if (!vvfl) 541 return; 542 543 vvfl->vsi_id = adapter->vsi_res->vsi_id; 544 vvfl->num_elements = count; 545 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 546 if (f->add) { 547 vvfl->vlan_id[i] = f->vlan; 548 i++; 549 f->add = false; 550 } 551 } 552 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; 553 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); 554 kfree(vvfl); 555} 556 557/** 558 * i40evf_del_vlans 559 * @adapter: adapter structure 560 * @vlans: the VLANs to remove 561 * @count: number of VLANs 562 * 563 * Request that the PF remove one or more VLAN filters from our VSI. 564 **/ 565void i40evf_del_vlans(struct i40evf_adapter *adapter) 566{ 567 struct i40e_virtchnl_vlan_filter_list *vvfl; 568 struct i40evf_vlan_filter *f, *ftmp; 569 int len, i = 0, count = 0; 570 571 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 572 /* bail because we already have a command pending */ 573 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", 574 adapter->current_op); 575 return; 576 } 577 578 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 579 if (f->remove) 580 count++; 581 } 582 if (!count) { 583 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 584 return; 585 } 586 adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN; 587 588 len = sizeof(struct i40e_virtchnl_vlan_filter_list) + 589 (count * sizeof(u16)); 590 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 591 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); 592 count = (I40EVF_MAX_AQ_BUF_SIZE - 593 sizeof(struct i40e_virtchnl_vlan_filter_list)) / 594 sizeof(u16); 595 len = I40EVF_MAX_AQ_BUF_SIZE; 596 } 597 vvfl = kzalloc(len, GFP_ATOMIC); 598 if (!vvfl) 599 return; 600 601 vvfl->vsi_id = adapter->vsi_res->vsi_id; 602 vvfl->num_elements = count; 603 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 604 if (f->remove) { 605 vvfl->vlan_id[i] = f->vlan; 606 i++; 607 list_del(&f->list); 608 kfree(f); 609 } 610 } 611 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 612 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); 613 kfree(vvfl); 614} 615 616/** 617 * i40evf_set_promiscuous 618 * @adapter: adapter structure 619 * @flags: bitmask to control unicast/multicast promiscuous. 620 * 621 * Request that the PF enable promiscuous mode for our VSI. 622 **/ 623void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) 624{ 625 struct i40e_virtchnl_promisc_info vpi; 626 627 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 628 /* bail because we already have a command pending */ 629 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", 630 adapter->current_op); 631 return; 632 } 633 adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; 634 vpi.vsi_id = adapter->vsi_res->vsi_id; 635 vpi.flags = flags; 636 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 637 (u8 *)&vpi, sizeof(vpi)); 638} 639 640/** 641 * i40evf_request_stats 642 * @adapter: adapter structure 643 * 644 * Request VSI statistics from PF. 645 **/ 646void i40evf_request_stats(struct i40evf_adapter *adapter) 647{ 648 struct i40e_virtchnl_queue_select vqs; 649 650 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 651 /* no error message, this isn't crucial */ 652 return; 653 } 654 adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS; 655 vqs.vsi_id = adapter->vsi_res->vsi_id; 656 /* queue maps are ignored for this message - only the vsi is used */ 657 if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS, 658 (u8 *)&vqs, sizeof(vqs))) 659 /* if the request failed, don't lock out others */ 660 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 661} 662/** 663 * i40evf_request_reset 664 * @adapter: adapter structure 665 * 666 * Request that the PF reset this VF. No response is expected. 667 **/ 668void i40evf_request_reset(struct i40evf_adapter *adapter) 669{ 670 /* Don't check CURRENT_OP - this is always higher priority */ 671 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); 672 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 673} 674 675/** 676 * i40evf_virtchnl_completion 677 * @adapter: adapter structure 678 * @v_opcode: opcode sent by PF 679 * @v_retval: retval sent by PF 680 * @msg: message sent by PF 681 * @msglen: message length 682 * 683 * Asynchronous completion function for admin queue messages. Rather than busy 684 * wait, we fire off our requests and assume that no errors will be returned. 685 * This function handles the reply messages. 686 **/ 687void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, 688 enum i40e_virtchnl_ops v_opcode, 689 i40e_status v_retval, 690 u8 *msg, u16 msglen) 691{ 692 struct net_device *netdev = adapter->netdev; 693 694 if (v_opcode == I40E_VIRTCHNL_OP_EVENT) { 695 struct i40e_virtchnl_pf_event *vpe = 696 (struct i40e_virtchnl_pf_event *)msg; 697 switch (vpe->event) { 698 case I40E_VIRTCHNL_EVENT_LINK_CHANGE: 699 adapter->link_up = 700 vpe->event_data.link_event.link_status; 701 if (adapter->link_up && !netif_carrier_ok(netdev)) { 702 dev_info(&adapter->pdev->dev, "NIC Link is Up\n"); 703 netif_carrier_on(netdev); 704 netif_tx_wake_all_queues(netdev); 705 } else if (!adapter->link_up) { 706 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 707 netif_carrier_off(netdev); 708 netif_tx_stop_all_queues(netdev); 709 } 710 break; 711 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: 712 dev_info(&adapter->pdev->dev, "PF reset warning received\n"); 713 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { 714 adapter->flags |= I40EVF_FLAG_RESET_PENDING; 715 dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); 716 schedule_work(&adapter->reset_task); 717 } 718 break; 719 default: 720 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", 721 vpe->event); 722 break; 723 } 724 return; 725 } 726 if (v_retval) { 727 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", 728 v_retval, i40evf_stat_str(&adapter->hw, v_retval), 729 v_opcode); 730 } 731 switch (v_opcode) { 732 case I40E_VIRTCHNL_OP_GET_STATS: { 733 struct i40e_eth_stats *stats = 734 (struct i40e_eth_stats *)msg; 735 adapter->net_stats.rx_packets = stats->rx_unicast + 736 stats->rx_multicast + 737 stats->rx_broadcast; 738 adapter->net_stats.tx_packets = stats->tx_unicast + 739 stats->tx_multicast + 740 stats->tx_broadcast; 741 adapter->net_stats.rx_bytes = stats->rx_bytes; 742 adapter->net_stats.tx_bytes = stats->tx_bytes; 743 adapter->net_stats.tx_errors = stats->tx_errors; 744 adapter->net_stats.rx_dropped = stats->rx_discards; 745 adapter->net_stats.tx_dropped = stats->tx_discards; 746 adapter->current_stats = *stats; 747 } 748 break; 749 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: { 750 u16 len = sizeof(struct i40e_virtchnl_vf_resource) + 751 I40E_MAX_VF_VSI * 752 sizeof(struct i40e_virtchnl_vsi_resource); 753 memcpy(adapter->vf_res, msg, min(msglen, len)); 754 i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); 755 /* restore current mac address */ 756 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 757 i40evf_process_config(adapter); 758 } 759 break; 760 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 761 /* enable transmits */ 762 i40evf_irq_enable(adapter, true); 763 netif_tx_start_all_queues(adapter->netdev); 764 netif_carrier_on(adapter->netdev); 765 break; 766 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 767 i40evf_free_all_tx_resources(adapter); 768 i40evf_free_all_rx_resources(adapter); 769 break; 770 case I40E_VIRTCHNL_OP_VERSION: 771 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 772 /* Don't display an error if we get these out of sequence. 773 * If the firmware needed to get kicked, we'll get these and 774 * it's no problem. 775 */ 776 if (v_opcode != adapter->current_op) 777 return; 778 break; 779 default: 780 if (v_opcode != adapter->current_op) 781 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", 782 adapter->current_op, v_opcode); 783 break; 784 } /* switch v_opcode */ 785 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 786} 787