1/******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27#include "i40e_status.h" 28#include "i40e_type.h" 29#include "i40e_register.h" 30#include "i40e_adminq.h" 31#include "i40e_prototype.h" 32 33/** 34 * i40e_is_nvm_update_op - return true if this is an NVM update operation 35 * @desc: API request descriptor 36 **/ 37static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) 38{ 39 return (desc->opcode == i40e_aqc_opc_nvm_erase) || 40 (desc->opcode == i40e_aqc_opc_nvm_update); 41} 42 43/** 44 * i40e_adminq_init_regs - Initialize AdminQ registers 45 * @hw: pointer to the hardware structure 46 * 47 * This assumes the alloc_asq and alloc_arq functions have already been called 48 **/ 49static void i40e_adminq_init_regs(struct i40e_hw *hw) 50{ 51 /* set head and tail registers in our local struct */ 52 if (i40e_is_vf(hw)) { 53 hw->aq.asq.tail = I40E_VF_ATQT1; 54 hw->aq.asq.head = I40E_VF_ATQH1; 55 hw->aq.asq.len = I40E_VF_ATQLEN1; 56 hw->aq.asq.bal = I40E_VF_ATQBAL1; 57 hw->aq.asq.bah = I40E_VF_ATQBAH1; 58 hw->aq.arq.tail = I40E_VF_ARQT1; 59 hw->aq.arq.head = I40E_VF_ARQH1; 60 hw->aq.arq.len = I40E_VF_ARQLEN1; 61 hw->aq.arq.bal = I40E_VF_ARQBAL1; 62 hw->aq.arq.bah = I40E_VF_ARQBAH1; 63 } else { 64 hw->aq.asq.tail = I40E_PF_ATQT; 65 hw->aq.asq.head = I40E_PF_ATQH; 66 hw->aq.asq.len = I40E_PF_ATQLEN; 67 hw->aq.asq.bal = I40E_PF_ATQBAL; 68 hw->aq.asq.bah = I40E_PF_ATQBAH; 69 hw->aq.arq.tail = I40E_PF_ARQT; 70 hw->aq.arq.head = I40E_PF_ARQH; 71 hw->aq.arq.len = I40E_PF_ARQLEN; 72 hw->aq.arq.bal = I40E_PF_ARQBAL; 73 hw->aq.arq.bah = I40E_PF_ARQBAH; 74 } 75} 76 77/** 78 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings 79 * @hw: pointer to the hardware structure 80 **/ 81static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) 82{ 83 i40e_status ret_code; 84 85 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, 86 i40e_mem_atq_ring, 87 (hw->aq.num_asq_entries * 88 sizeof(struct i40e_aq_desc)), 89 I40E_ADMINQ_DESC_ALIGNMENT); 90 if (ret_code) 91 return ret_code; 92 93 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, 94 (hw->aq.num_asq_entries * 95 sizeof(struct i40e_asq_cmd_details))); 96 if (ret_code) { 97 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 98 return ret_code; 99 } 100 101 return ret_code; 102} 103 104/** 105 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings 106 * @hw: pointer to the hardware structure 107 **/ 108static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) 109{ 110 i40e_status ret_code; 111 112 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, 113 i40e_mem_arq_ring, 114 (hw->aq.num_arq_entries * 115 sizeof(struct i40e_aq_desc)), 116 I40E_ADMINQ_DESC_ALIGNMENT); 117 118 return ret_code; 119} 120 121/** 122 * i40e_free_adminq_asq - Free Admin Queue send rings 123 * @hw: pointer to the hardware structure 124 * 125 * This assumes the posted send buffers have already been cleaned 126 * and de-allocated 127 **/ 128static void i40e_free_adminq_asq(struct i40e_hw *hw) 129{ 130 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 131} 132 133/** 134 * i40e_free_adminq_arq - Free Admin Queue receive rings 135 * @hw: pointer to the hardware structure 136 * 137 * This assumes the posted receive buffers have already been cleaned 138 * and de-allocated 139 **/ 140static void i40e_free_adminq_arq(struct i40e_hw *hw) 141{ 142 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 143} 144 145/** 146 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue 147 * @hw: pointer to the hardware structure 148 **/ 149static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) 150{ 151 i40e_status ret_code; 152 struct i40e_aq_desc *desc; 153 struct i40e_dma_mem *bi; 154 int i; 155 156 /* We'll be allocating the buffer info memory first, then we can 157 * allocate the mapped buffers for the event processing 158 */ 159 160 /* buffer_info structures do not need alignment */ 161 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, 162 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); 163 if (ret_code) 164 goto alloc_arq_bufs; 165 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; 166 167 /* allocate the mapped buffers */ 168 for (i = 0; i < hw->aq.num_arq_entries; i++) { 169 bi = &hw->aq.arq.r.arq_bi[i]; 170 ret_code = i40e_allocate_dma_mem(hw, bi, 171 i40e_mem_arq_buf, 172 hw->aq.arq_buf_size, 173 I40E_ADMINQ_DESC_ALIGNMENT); 174 if (ret_code) 175 goto unwind_alloc_arq_bufs; 176 177 /* now configure the descriptors for use */ 178 desc = I40E_ADMINQ_DESC(hw->aq.arq, i); 179 180 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); 181 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 182 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); 183 desc->opcode = 0; 184 /* This is in accordance with Admin queue design, there is no 185 * register for buffer size configuration 186 */ 187 desc->datalen = cpu_to_le16((u16)bi->size); 188 desc->retval = 0; 189 desc->cookie_high = 0; 190 desc->cookie_low = 0; 191 desc->params.external.addr_high = 192 cpu_to_le32(upper_32_bits(bi->pa)); 193 desc->params.external.addr_low = 194 cpu_to_le32(lower_32_bits(bi->pa)); 195 desc->params.external.param0 = 0; 196 desc->params.external.param1 = 0; 197 } 198 199alloc_arq_bufs: 200 return ret_code; 201 202unwind_alloc_arq_bufs: 203 /* don't try to free the one that failed... */ 204 i--; 205 for (; i >= 0; i--) 206 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 207 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 208 209 return ret_code; 210} 211 212/** 213 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue 214 * @hw: pointer to the hardware structure 215 **/ 216static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) 217{ 218 i40e_status ret_code; 219 struct i40e_dma_mem *bi; 220 int i; 221 222 /* No mapped memory needed yet, just the buffer info structures */ 223 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, 224 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); 225 if (ret_code) 226 goto alloc_asq_bufs; 227 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; 228 229 /* allocate the mapped buffers */ 230 for (i = 0; i < hw->aq.num_asq_entries; i++) { 231 bi = &hw->aq.asq.r.asq_bi[i]; 232 ret_code = i40e_allocate_dma_mem(hw, bi, 233 i40e_mem_asq_buf, 234 hw->aq.asq_buf_size, 235 I40E_ADMINQ_DESC_ALIGNMENT); 236 if (ret_code) 237 goto unwind_alloc_asq_bufs; 238 } 239alloc_asq_bufs: 240 return ret_code; 241 242unwind_alloc_asq_bufs: 243 /* don't try to free the one that failed... */ 244 i--; 245 for (; i >= 0; i--) 246 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 247 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 248 249 return ret_code; 250} 251 252/** 253 * i40e_free_arq_bufs - Free receive queue buffer info elements 254 * @hw: pointer to the hardware structure 255 **/ 256static void i40e_free_arq_bufs(struct i40e_hw *hw) 257{ 258 int i; 259 260 /* free descriptors */ 261 for (i = 0; i < hw->aq.num_arq_entries; i++) 262 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 263 264 /* free the descriptor memory */ 265 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 266 267 /* free the dma header */ 268 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 269} 270 271/** 272 * i40e_free_asq_bufs - Free send queue buffer info elements 273 * @hw: pointer to the hardware structure 274 **/ 275static void i40e_free_asq_bufs(struct i40e_hw *hw) 276{ 277 int i; 278 279 /* only unmap if the address is non-NULL */ 280 for (i = 0; i < hw->aq.num_asq_entries; i++) 281 if (hw->aq.asq.r.asq_bi[i].pa) 282 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 283 284 /* free the buffer info list */ 285 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 286 287 /* free the descriptor memory */ 288 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 289 290 /* free the dma header */ 291 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 292} 293 294/** 295 * i40e_config_asq_regs - configure ASQ registers 296 * @hw: pointer to the hardware structure 297 * 298 * Configure base address and length registers for the transmit queue 299 **/ 300static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) 301{ 302 i40e_status ret_code = 0; 303 u32 reg = 0; 304 305 /* Clear Head and Tail */ 306 wr32(hw, hw->aq.asq.head, 0); 307 wr32(hw, hw->aq.asq.tail, 0); 308 309 /* set starting point */ 310 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 311 I40E_PF_ATQLEN_ATQENABLE_MASK)); 312 wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); 313 wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); 314 315 /* Check one register to verify that config was applied */ 316 reg = rd32(hw, hw->aq.asq.bal); 317 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) 318 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 319 320 return ret_code; 321} 322 323/** 324 * i40e_config_arq_regs - ARQ register configuration 325 * @hw: pointer to the hardware structure 326 * 327 * Configure base address and length registers for the receive (event queue) 328 **/ 329static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) 330{ 331 i40e_status ret_code = 0; 332 u32 reg = 0; 333 334 /* Clear Head and Tail */ 335 wr32(hw, hw->aq.arq.head, 0); 336 wr32(hw, hw->aq.arq.tail, 0); 337 338 /* set starting point */ 339 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 340 I40E_PF_ARQLEN_ARQENABLE_MASK)); 341 wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); 342 wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); 343 344 /* Update tail in the HW to post pre-allocated buffers */ 345 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 346 347 /* Check one register to verify that config was applied */ 348 reg = rd32(hw, hw->aq.arq.bal); 349 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) 350 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 351 352 return ret_code; 353} 354 355/** 356 * i40e_init_asq - main initialization routine for ASQ 357 * @hw: pointer to the hardware structure 358 * 359 * This is the main initialization routine for the Admin Send Queue 360 * Prior to calling this function, drivers *MUST* set the following fields 361 * in the hw->aq structure: 362 * - hw->aq.num_asq_entries 363 * - hw->aq.arq_buf_size 364 * 365 * Do *NOT* hold the lock when calling this as the memory allocation routines 366 * called are not going to be atomic context safe 367 **/ 368static i40e_status i40e_init_asq(struct i40e_hw *hw) 369{ 370 i40e_status ret_code = 0; 371 372 if (hw->aq.asq.count > 0) { 373 /* queue already initialized */ 374 ret_code = I40E_ERR_NOT_READY; 375 goto init_adminq_exit; 376 } 377 378 /* verify input for valid configuration */ 379 if ((hw->aq.num_asq_entries == 0) || 380 (hw->aq.asq_buf_size == 0)) { 381 ret_code = I40E_ERR_CONFIG; 382 goto init_adminq_exit; 383 } 384 385 hw->aq.asq.next_to_use = 0; 386 hw->aq.asq.next_to_clean = 0; 387 hw->aq.asq.count = hw->aq.num_asq_entries; 388 389 /* allocate the ring memory */ 390 ret_code = i40e_alloc_adminq_asq_ring(hw); 391 if (ret_code) 392 goto init_adminq_exit; 393 394 /* allocate buffers in the rings */ 395 ret_code = i40e_alloc_asq_bufs(hw); 396 if (ret_code) 397 goto init_adminq_free_rings; 398 399 /* initialize base registers */ 400 ret_code = i40e_config_asq_regs(hw); 401 if (ret_code) 402 goto init_adminq_free_rings; 403 404 /* success! */ 405 goto init_adminq_exit; 406 407init_adminq_free_rings: 408 i40e_free_adminq_asq(hw); 409 410init_adminq_exit: 411 return ret_code; 412} 413 414/** 415 * i40e_init_arq - initialize ARQ 416 * @hw: pointer to the hardware structure 417 * 418 * The main initialization routine for the Admin Receive (Event) Queue. 419 * Prior to calling this function, drivers *MUST* set the following fields 420 * in the hw->aq structure: 421 * - hw->aq.num_asq_entries 422 * - hw->aq.arq_buf_size 423 * 424 * Do *NOT* hold the lock when calling this as the memory allocation routines 425 * called are not going to be atomic context safe 426 **/ 427static i40e_status i40e_init_arq(struct i40e_hw *hw) 428{ 429 i40e_status ret_code = 0; 430 431 if (hw->aq.arq.count > 0) { 432 /* queue already initialized */ 433 ret_code = I40E_ERR_NOT_READY; 434 goto init_adminq_exit; 435 } 436 437 /* verify input for valid configuration */ 438 if ((hw->aq.num_arq_entries == 0) || 439 (hw->aq.arq_buf_size == 0)) { 440 ret_code = I40E_ERR_CONFIG; 441 goto init_adminq_exit; 442 } 443 444 hw->aq.arq.next_to_use = 0; 445 hw->aq.arq.next_to_clean = 0; 446 hw->aq.arq.count = hw->aq.num_arq_entries; 447 448 /* allocate the ring memory */ 449 ret_code = i40e_alloc_adminq_arq_ring(hw); 450 if (ret_code) 451 goto init_adminq_exit; 452 453 /* allocate buffers in the rings */ 454 ret_code = i40e_alloc_arq_bufs(hw); 455 if (ret_code) 456 goto init_adminq_free_rings; 457 458 /* initialize base registers */ 459 ret_code = i40e_config_arq_regs(hw); 460 if (ret_code) 461 goto init_adminq_free_rings; 462 463 /* success! */ 464 goto init_adminq_exit; 465 466init_adminq_free_rings: 467 i40e_free_adminq_arq(hw); 468 469init_adminq_exit: 470 return ret_code; 471} 472 473/** 474 * i40e_shutdown_asq - shutdown the ASQ 475 * @hw: pointer to the hardware structure 476 * 477 * The main shutdown routine for the Admin Send Queue 478 **/ 479static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) 480{ 481 i40e_status ret_code = 0; 482 483 if (hw->aq.asq.count == 0) 484 return I40E_ERR_NOT_READY; 485 486 /* Stop firmware AdminQ processing */ 487 wr32(hw, hw->aq.asq.head, 0); 488 wr32(hw, hw->aq.asq.tail, 0); 489 wr32(hw, hw->aq.asq.len, 0); 490 wr32(hw, hw->aq.asq.bal, 0); 491 wr32(hw, hw->aq.asq.bah, 0); 492 493 /* make sure lock is available */ 494 mutex_lock(&hw->aq.asq_mutex); 495 496 hw->aq.asq.count = 0; /* to indicate uninitialized queue */ 497 498 /* free ring buffers */ 499 i40e_free_asq_bufs(hw); 500 501 mutex_unlock(&hw->aq.asq_mutex); 502 503 return ret_code; 504} 505 506/** 507 * i40e_shutdown_arq - shutdown ARQ 508 * @hw: pointer to the hardware structure 509 * 510 * The main shutdown routine for the Admin Receive Queue 511 **/ 512static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) 513{ 514 i40e_status ret_code = 0; 515 516 if (hw->aq.arq.count == 0) 517 return I40E_ERR_NOT_READY; 518 519 /* Stop firmware AdminQ processing */ 520 wr32(hw, hw->aq.arq.head, 0); 521 wr32(hw, hw->aq.arq.tail, 0); 522 wr32(hw, hw->aq.arq.len, 0); 523 wr32(hw, hw->aq.arq.bal, 0); 524 wr32(hw, hw->aq.arq.bah, 0); 525 526 /* make sure lock is available */ 527 mutex_lock(&hw->aq.arq_mutex); 528 529 hw->aq.arq.count = 0; /* to indicate uninitialized queue */ 530 531 /* free ring buffers */ 532 i40e_free_arq_bufs(hw); 533 534 mutex_unlock(&hw->aq.arq_mutex); 535 536 return ret_code; 537} 538 539/** 540 * i40evf_init_adminq - main initialization routine for Admin Queue 541 * @hw: pointer to the hardware structure 542 * 543 * Prior to calling this function, drivers *MUST* set the following fields 544 * in the hw->aq structure: 545 * - hw->aq.num_asq_entries 546 * - hw->aq.num_arq_entries 547 * - hw->aq.arq_buf_size 548 * - hw->aq.asq_buf_size 549 **/ 550i40e_status i40evf_init_adminq(struct i40e_hw *hw) 551{ 552 i40e_status ret_code; 553 554 /* verify input for valid configuration */ 555 if ((hw->aq.num_arq_entries == 0) || 556 (hw->aq.num_asq_entries == 0) || 557 (hw->aq.arq_buf_size == 0) || 558 (hw->aq.asq_buf_size == 0)) { 559 ret_code = I40E_ERR_CONFIG; 560 goto init_adminq_exit; 561 } 562 563 /* initialize locks */ 564 mutex_init(&hw->aq.asq_mutex); 565 mutex_init(&hw->aq.arq_mutex); 566 567 /* Set up register offsets */ 568 i40e_adminq_init_regs(hw); 569 570 /* setup ASQ command write back timeout */ 571 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; 572 573 /* allocate the ASQ */ 574 ret_code = i40e_init_asq(hw); 575 if (ret_code) 576 goto init_adminq_destroy_locks; 577 578 /* allocate the ARQ */ 579 ret_code = i40e_init_arq(hw); 580 if (ret_code) 581 goto init_adminq_free_asq; 582 583 /* success! */ 584 goto init_adminq_exit; 585 586init_adminq_free_asq: 587 i40e_shutdown_asq(hw); 588init_adminq_destroy_locks: 589 590init_adminq_exit: 591 return ret_code; 592} 593 594/** 595 * i40evf_shutdown_adminq - shutdown routine for the Admin Queue 596 * @hw: pointer to the hardware structure 597 **/ 598i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) 599{ 600 i40e_status ret_code = 0; 601 602 if (i40evf_check_asq_alive(hw)) 603 i40evf_aq_queue_shutdown(hw, true); 604 605 i40e_shutdown_asq(hw); 606 i40e_shutdown_arq(hw); 607 608 /* destroy the locks */ 609 610 return ret_code; 611} 612 613/** 614 * i40e_clean_asq - cleans Admin send queue 615 * @hw: pointer to the hardware structure 616 * 617 * returns the number of free desc 618 **/ 619static u16 i40e_clean_asq(struct i40e_hw *hw) 620{ 621 struct i40e_adminq_ring *asq = &(hw->aq.asq); 622 struct i40e_asq_cmd_details *details; 623 u16 ntc = asq->next_to_clean; 624 struct i40e_aq_desc desc_cb; 625 struct i40e_aq_desc *desc; 626 627 desc = I40E_ADMINQ_DESC(*asq, ntc); 628 details = I40E_ADMINQ_DETAILS(*asq, ntc); 629 while (rd32(hw, hw->aq.asq.head) != ntc) { 630 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 631 "%s: ntc %d head %d.\n", __func__, ntc, 632 rd32(hw, hw->aq.asq.head)); 633 634 if (details->callback) { 635 I40E_ADMINQ_CALLBACK cb_func = 636 (I40E_ADMINQ_CALLBACK)details->callback; 637 desc_cb = *desc; 638 cb_func(hw, &desc_cb); 639 } 640 memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); 641 memset((void *)details, 0, 642 sizeof(struct i40e_asq_cmd_details)); 643 ntc++; 644 if (ntc == asq->count) 645 ntc = 0; 646 desc = I40E_ADMINQ_DESC(*asq, ntc); 647 details = I40E_ADMINQ_DETAILS(*asq, ntc); 648 } 649 650 asq->next_to_clean = ntc; 651 652 return I40E_DESC_UNUSED(asq); 653} 654 655/** 656 * i40evf_asq_done - check if FW has processed the Admin Send Queue 657 * @hw: pointer to the hw struct 658 * 659 * Returns true if the firmware has processed all descriptors on the 660 * admin send queue. Returns false if there are still requests pending. 661 **/ 662bool i40evf_asq_done(struct i40e_hw *hw) 663{ 664 /* AQ designers suggest use of head for better 665 * timing reliability than DD bit 666 */ 667 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; 668 669} 670 671/** 672 * i40evf_asq_send_command - send command to Admin Queue 673 * @hw: pointer to the hw struct 674 * @desc: prefilled descriptor describing the command (non DMA mem) 675 * @buff: buffer to use for indirect commands 676 * @buff_size: size of buffer for indirect commands 677 * @cmd_details: pointer to command details structure 678 * 679 * This is the main send command driver routine for the Admin Queue send 680 * queue. It runs the queue, cleans the queue, etc 681 **/ 682i40e_status i40evf_asq_send_command(struct i40e_hw *hw, 683 struct i40e_aq_desc *desc, 684 void *buff, /* can be NULL */ 685 u16 buff_size, 686 struct i40e_asq_cmd_details *cmd_details) 687{ 688 i40e_status status = 0; 689 struct i40e_dma_mem *dma_buff = NULL; 690 struct i40e_asq_cmd_details *details; 691 struct i40e_aq_desc *desc_on_ring; 692 bool cmd_completed = false; 693 u16 retval = 0; 694 u32 val = 0; 695 696 val = rd32(hw, hw->aq.asq.head); 697 if (val >= hw->aq.num_asq_entries) { 698 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 699 "AQTX: head overrun at %d\n", val); 700 status = I40E_ERR_QUEUE_EMPTY; 701 goto asq_send_command_exit; 702 } 703 704 if (hw->aq.asq.count == 0) { 705 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 706 "AQTX: Admin queue not initialized.\n"); 707 status = I40E_ERR_QUEUE_EMPTY; 708 goto asq_send_command_exit; 709 } 710 711 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 712 if (cmd_details) { 713 *details = *cmd_details; 714 715 /* If the cmd_details are defined copy the cookie. The 716 * cpu_to_le32 is not needed here because the data is ignored 717 * by the FW, only used by the driver 718 */ 719 if (details->cookie) { 720 desc->cookie_high = 721 cpu_to_le32(upper_32_bits(details->cookie)); 722 desc->cookie_low = 723 cpu_to_le32(lower_32_bits(details->cookie)); 724 } 725 } else { 726 memset(details, 0, sizeof(struct i40e_asq_cmd_details)); 727 } 728 729 /* clear requested flags and then set additional flags if defined */ 730 desc->flags &= ~cpu_to_le16(details->flags_dis); 731 desc->flags |= cpu_to_le16(details->flags_ena); 732 733 mutex_lock(&hw->aq.asq_mutex); 734 735 if (buff_size > hw->aq.asq_buf_size) { 736 i40e_debug(hw, 737 I40E_DEBUG_AQ_MESSAGE, 738 "AQTX: Invalid buffer size: %d.\n", 739 buff_size); 740 status = I40E_ERR_INVALID_SIZE; 741 goto asq_send_command_error; 742 } 743 744 if (details->postpone && !details->async) { 745 i40e_debug(hw, 746 I40E_DEBUG_AQ_MESSAGE, 747 "AQTX: Async flag not set along with postpone flag"); 748 status = I40E_ERR_PARAM; 749 goto asq_send_command_error; 750 } 751 752 /* call clean and check queue available function to reclaim the 753 * descriptors that were processed by FW, the function returns the 754 * number of desc available 755 */ 756 /* the clean function called here could be called in a separate thread 757 * in case of asynchronous completions 758 */ 759 if (i40e_clean_asq(hw) == 0) { 760 i40e_debug(hw, 761 I40E_DEBUG_AQ_MESSAGE, 762 "AQTX: Error queue is full.\n"); 763 status = I40E_ERR_ADMIN_QUEUE_FULL; 764 goto asq_send_command_error; 765 } 766 767 /* initialize the temp desc pointer with the right desc */ 768 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 769 770 /* if the desc is available copy the temp desc to the right place */ 771 *desc_on_ring = *desc; 772 773 /* if buff is not NULL assume indirect command */ 774 if (buff != NULL) { 775 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); 776 /* copy the user buff into the respective DMA buff */ 777 memcpy(dma_buff->va, buff, buff_size); 778 desc_on_ring->datalen = cpu_to_le16(buff_size); 779 780 /* Update the address values in the desc with the pa value 781 * for respective buffer 782 */ 783 desc_on_ring->params.external.addr_high = 784 cpu_to_le32(upper_32_bits(dma_buff->pa)); 785 desc_on_ring->params.external.addr_low = 786 cpu_to_le32(lower_32_bits(dma_buff->pa)); 787 } 788 789 /* bump the tail */ 790 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); 791 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, 792 buff, buff_size); 793 (hw->aq.asq.next_to_use)++; 794 if (hw->aq.asq.next_to_use == hw->aq.asq.count) 795 hw->aq.asq.next_to_use = 0; 796 if (!details->postpone) 797 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); 798 799 /* if cmd_details are not defined or async flag is not set, 800 * we need to wait for desc write back 801 */ 802 if (!details->async && !details->postpone) { 803 u32 total_delay = 0; 804 805 do { 806 /* AQ designers suggest use of head for better 807 * timing reliability than DD bit 808 */ 809 if (i40evf_asq_done(hw)) 810 break; 811 usleep_range(1000, 2000); 812 total_delay++; 813 } while (total_delay < hw->aq.asq_cmd_timeout); 814 } 815 816 /* if ready, copy the desc back to temp */ 817 if (i40evf_asq_done(hw)) { 818 *desc = *desc_on_ring; 819 if (buff != NULL) 820 memcpy(buff, dma_buff->va, buff_size); 821 retval = le16_to_cpu(desc->retval); 822 if (retval != 0) { 823 i40e_debug(hw, 824 I40E_DEBUG_AQ_MESSAGE, 825 "AQTX: Command completed with error 0x%X.\n", 826 retval); 827 828 /* strip off FW internal code */ 829 retval &= 0xff; 830 } 831 cmd_completed = true; 832 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) 833 status = 0; 834 else 835 status = I40E_ERR_ADMIN_QUEUE_ERROR; 836 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 837 } 838 839 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 840 "AQTX: desc and buffer writeback:\n"); 841 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, 842 buff_size); 843 844 /* update the error if time out occurred */ 845 if ((!cmd_completed) && 846 (!details->async && !details->postpone)) { 847 i40e_debug(hw, 848 I40E_DEBUG_AQ_MESSAGE, 849 "AQTX: Writeback timeout.\n"); 850 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; 851 } 852 853asq_send_command_error: 854 mutex_unlock(&hw->aq.asq_mutex); 855asq_send_command_exit: 856 return status; 857} 858 859/** 860 * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function 861 * @desc: pointer to the temp descriptor (non DMA mem) 862 * @opcode: the opcode can be used to decide which flags to turn off or on 863 * 864 * Fill the desc with default values 865 **/ 866void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 867 u16 opcode) 868{ 869 /* zero out the desc */ 870 memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); 871 desc->opcode = cpu_to_le16(opcode); 872 desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); 873} 874 875/** 876 * i40evf_clean_arq_element 877 * @hw: pointer to the hw struct 878 * @e: event info from the receive descriptor, includes any buffers 879 * @pending: number of events that could be left to process 880 * 881 * This function cleans one Admin Receive Queue element and returns 882 * the contents through e. It can also return how many events are 883 * left to process through 'pending' 884 **/ 885i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, 886 struct i40e_arq_event_info *e, 887 u16 *pending) 888{ 889 i40e_status ret_code = 0; 890 u16 ntc = hw->aq.arq.next_to_clean; 891 struct i40e_aq_desc *desc; 892 struct i40e_dma_mem *bi; 893 u16 desc_idx; 894 u16 datalen; 895 u16 flags; 896 u16 ntu; 897 898 /* take the lock before we start messing with the ring */ 899 mutex_lock(&hw->aq.arq_mutex); 900 901 /* set next_to_use to head */ 902 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); 903 if (ntu == ntc) { 904 /* nothing to do - shouldn't need to update ring's values */ 905 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; 906 goto clean_arq_element_out; 907 } 908 909 /* now clean the next descriptor */ 910 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); 911 desc_idx = ntc; 912 913 flags = le16_to_cpu(desc->flags); 914 if (flags & I40E_AQ_FLAG_ERR) { 915 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 916 hw->aq.arq_last_status = 917 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); 918 i40e_debug(hw, 919 I40E_DEBUG_AQ_MESSAGE, 920 "AQRX: Event received with error 0x%X.\n", 921 hw->aq.arq_last_status); 922 } 923 924 e->desc = *desc; 925 datalen = le16_to_cpu(desc->datalen); 926 e->msg_len = min(datalen, e->buf_len); 927 if (e->msg_buf != NULL && (e->msg_len != 0)) 928 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, 929 e->msg_len); 930 931 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); 932 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, 933 hw->aq.arq_buf_size); 934 935 /* Restore the original datalen and buffer address in the desc, 936 * FW updates datalen to indicate the event message 937 * size 938 */ 939 bi = &hw->aq.arq.r.arq_bi[ntc]; 940 memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); 941 942 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); 943 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 944 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); 945 desc->datalen = cpu_to_le16((u16)bi->size); 946 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 947 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 948 949 /* set tail = the last cleaned desc index. */ 950 wr32(hw, hw->aq.arq.tail, ntc); 951 /* ntc is updated to tail + 1 */ 952 ntc++; 953 if (ntc == hw->aq.num_arq_entries) 954 ntc = 0; 955 hw->aq.arq.next_to_clean = ntc; 956 hw->aq.arq.next_to_use = ntu; 957 958clean_arq_element_out: 959 /* Set pending if needed, unlock and return */ 960 if (pending != NULL) 961 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 962 mutex_unlock(&hw->aq.arq_mutex); 963 964 return ret_code; 965} 966 967void i40evf_resume_aq(struct i40e_hw *hw) 968{ 969 /* Registers are reset after PF reset */ 970 hw->aq.asq.next_to_use = 0; 971 hw->aq.asq.next_to_clean = 0; 972 973 i40e_config_asq_regs(hw); 974 975 hw->aq.arq.next_to_use = 0; 976 hw->aq.arq.next_to_clean = 0; 977 978 i40e_config_arq_regs(hw); 979} 980