root/drivers/net/ethernet/intel/i40e/i40e_nvm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. i40e_init_nvm
  2. i40e_acquire_nvm
  3. i40e_release_nvm
  4. i40e_poll_sr_srctl_done_bit
  5. i40e_read_nvm_word_srctl
  6. i40e_read_nvm_aq
  7. i40e_read_nvm_word_aq
  8. __i40e_read_nvm_word
  9. i40e_read_nvm_word
  10. i40e_read_nvm_module_data
  11. i40e_read_nvm_buffer_srctl
  12. i40e_read_nvm_buffer_aq
  13. __i40e_read_nvm_buffer
  14. i40e_read_nvm_buffer
  15. i40e_write_nvm_aq
  16. i40e_calc_nvm_checksum
  17. i40e_update_nvm_checksum
  18. i40e_validate_nvm_checksum
  19. i40e_nvmupd_get_module
  20. i40e_nvmupd_get_transaction
  21. i40e_nvmupd_get_preservation_flags
  22. i40e_nvmupd_command
  23. i40e_nvmupd_state_init
  24. i40e_nvmupd_state_reading
  25. i40e_nvmupd_state_writing
  26. i40e_nvmupd_clear_wait_state
  27. i40e_nvmupd_check_wait_event
  28. i40e_nvmupd_validate_command
  29. i40e_nvmupd_exec_aq
  30. i40e_nvmupd_get_aq_result
  31. i40e_nvmupd_get_aq_event
  32. i40e_nvmupd_nvm_read
  33. i40e_nvmupd_nvm_erase
  34. i40e_nvmupd_nvm_write

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
   3 
   4 #include "i40e_prototype.h"
   5 
   6 /**
   7  * i40e_init_nvm_ops - Initialize NVM function pointers
   8  * @hw: pointer to the HW structure
   9  *
  10  * Setup the function pointers and the NVM info structure. Should be called
  11  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
  12  * Please notice that the NVM term is used here (& in all methods covered
  13  * in this file) as an equivalent of the FLASH part mapped into the SR.
  14  * We are accessing FLASH always thru the Shadow RAM.
  15  **/
  16 i40e_status i40e_init_nvm(struct i40e_hw *hw)
  17 {
  18         struct i40e_nvm_info *nvm = &hw->nvm;
  19         i40e_status ret_code = 0;
  20         u32 fla, gens;
  21         u8 sr_size;
  22 
  23         /* The SR size is stored regardless of the nvm programming mode
  24          * as the blank mode may be used in the factory line.
  25          */
  26         gens = rd32(hw, I40E_GLNVM_GENS);
  27         sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
  28                            I40E_GLNVM_GENS_SR_SIZE_SHIFT);
  29         /* Switching to words (sr_size contains power of 2KB) */
  30         nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
  31 
  32         /* Check if we are in the normal or blank NVM programming mode */
  33         fla = rd32(hw, I40E_GLNVM_FLA);
  34         if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
  35                 /* Max NVM timeout */
  36                 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
  37                 nvm->blank_nvm_mode = false;
  38         } else { /* Blank programming mode */
  39                 nvm->blank_nvm_mode = true;
  40                 ret_code = I40E_ERR_NVM_BLANK_MODE;
  41                 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
  42         }
  43 
  44         return ret_code;
  45 }
  46 
  47 /**
  48  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
  49  * @hw: pointer to the HW structure
  50  * @access: NVM access type (read or write)
  51  *
  52  * This function will request NVM ownership for reading
  53  * via the proper Admin Command.
  54  **/
  55 i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
  56                                        enum i40e_aq_resource_access_type access)
  57 {
  58         i40e_status ret_code = 0;
  59         u64 gtime, timeout;
  60         u64 time_left = 0;
  61 
  62         if (hw->nvm.blank_nvm_mode)
  63                 goto i40e_i40e_acquire_nvm_exit;
  64 
  65         ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
  66                                             0, &time_left, NULL);
  67         /* Reading the Global Device Timer */
  68         gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  69 
  70         /* Store the timeout */
  71         hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
  72 
  73         if (ret_code)
  74                 i40e_debug(hw, I40E_DEBUG_NVM,
  75                            "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
  76                            access, time_left, ret_code, hw->aq.asq_last_status);
  77 
  78         if (ret_code && time_left) {
  79                 /* Poll until the current NVM owner timeouts */
  80                 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
  81                 while ((gtime < timeout) && time_left) {
  82                         usleep_range(10000, 20000);
  83                         gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  84                         ret_code = i40e_aq_request_resource(hw,
  85                                                         I40E_NVM_RESOURCE_ID,
  86                                                         access, 0, &time_left,
  87                                                         NULL);
  88                         if (!ret_code) {
  89                                 hw->nvm.hw_semaphore_timeout =
  90                                             I40E_MS_TO_GTIME(time_left) + gtime;
  91                                 break;
  92                         }
  93                 }
  94                 if (ret_code) {
  95                         hw->nvm.hw_semaphore_timeout = 0;
  96                         i40e_debug(hw, I40E_DEBUG_NVM,
  97                                    "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
  98                                    time_left, ret_code, hw->aq.asq_last_status);
  99                 }
 100         }
 101 
 102 i40e_i40e_acquire_nvm_exit:
 103         return ret_code;
 104 }
 105 
 106 /**
 107  * i40e_release_nvm - Generic request for releasing the NVM ownership
 108  * @hw: pointer to the HW structure
 109  *
 110  * This function will release NVM resource via the proper Admin Command.
 111  **/
 112 void i40e_release_nvm(struct i40e_hw *hw)
 113 {
 114         i40e_status ret_code = I40E_SUCCESS;
 115         u32 total_delay = 0;
 116 
 117         if (hw->nvm.blank_nvm_mode)
 118                 return;
 119 
 120         ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
 121 
 122         /* there are some rare cases when trying to release the resource
 123          * results in an admin Q timeout, so handle them correctly
 124          */
 125         while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
 126                (total_delay < hw->aq.asq_cmd_timeout)) {
 127                 usleep_range(1000, 2000);
 128                 ret_code = i40e_aq_release_resource(hw,
 129                                                     I40E_NVM_RESOURCE_ID,
 130                                                     0, NULL);
 131                 total_delay++;
 132         }
 133 }
 134 
 135 /**
 136  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
 137  * @hw: pointer to the HW structure
 138  *
 139  * Polls the SRCTL Shadow RAM register done bit.
 140  **/
 141 static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
 142 {
 143         i40e_status ret_code = I40E_ERR_TIMEOUT;
 144         u32 srctl, wait_cnt;
 145 
 146         /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
 147         for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
 148                 srctl = rd32(hw, I40E_GLNVM_SRCTL);
 149                 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
 150                         ret_code = 0;
 151                         break;
 152                 }
 153                 udelay(5);
 154         }
 155         if (ret_code == I40E_ERR_TIMEOUT)
 156                 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
 157         return ret_code;
 158 }
 159 
 160 /**
 161  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
 162  * @hw: pointer to the HW structure
 163  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
 164  * @data: word read from the Shadow RAM
 165  *
 166  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
 167  **/
 168 static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
 169                                             u16 *data)
 170 {
 171         i40e_status ret_code = I40E_ERR_TIMEOUT;
 172         u32 sr_reg;
 173 
 174         if (offset >= hw->nvm.sr_size) {
 175                 i40e_debug(hw, I40E_DEBUG_NVM,
 176                            "NVM read error: offset %d beyond Shadow RAM limit %d\n",
 177                            offset, hw->nvm.sr_size);
 178                 ret_code = I40E_ERR_PARAM;
 179                 goto read_nvm_exit;
 180         }
 181 
 182         /* Poll the done bit first */
 183         ret_code = i40e_poll_sr_srctl_done_bit(hw);
 184         if (!ret_code) {
 185                 /* Write the address and start reading */
 186                 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
 187                          BIT(I40E_GLNVM_SRCTL_START_SHIFT);
 188                 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
 189 
 190                 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
 191                 ret_code = i40e_poll_sr_srctl_done_bit(hw);
 192                 if (!ret_code) {
 193                         sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
 194                         *data = (u16)((sr_reg &
 195                                        I40E_GLNVM_SRDATA_RDDATA_MASK)
 196                                     >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
 197                 }
 198         }
 199         if (ret_code)
 200                 i40e_debug(hw, I40E_DEBUG_NVM,
 201                            "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
 202                            offset);
 203 
 204 read_nvm_exit:
 205         return ret_code;
 206 }
 207 
 208 /**
 209  * i40e_read_nvm_aq - Read Shadow RAM.
 210  * @hw: pointer to the HW structure.
 211  * @module_pointer: module pointer location in words from the NVM beginning
 212  * @offset: offset in words from module start
 213  * @words: number of words to write
 214  * @data: buffer with words to write to the Shadow RAM
 215  * @last_command: tells the AdminQ that this is the last command
 216  *
 217  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
 218  **/
 219 static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
 220                                     u8 module_pointer, u32 offset,
 221                                     u16 words, void *data,
 222                                     bool last_command)
 223 {
 224         i40e_status ret_code = I40E_ERR_NVM;
 225         struct i40e_asq_cmd_details cmd_details;
 226 
 227         memset(&cmd_details, 0, sizeof(cmd_details));
 228         cmd_details.wb_desc = &hw->nvm_wb_desc;
 229 
 230         /* Here we are checking the SR limit only for the flat memory model.
 231          * We cannot do it for the module-based model, as we did not acquire
 232          * the NVM resource yet (we cannot get the module pointer value).
 233          * Firmware will check the module-based model.
 234          */
 235         if ((offset + words) > hw->nvm.sr_size)
 236                 i40e_debug(hw, I40E_DEBUG_NVM,
 237                            "NVM write error: offset %d beyond Shadow RAM limit %d\n",
 238                            (offset + words), hw->nvm.sr_size);
 239         else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
 240                 /* We can write only up to 4KB (one sector), in one AQ write */
 241                 i40e_debug(hw, I40E_DEBUG_NVM,
 242                            "NVM write fail error: tried to write %d words, limit is %d.\n",
 243                            words, I40E_SR_SECTOR_SIZE_IN_WORDS);
 244         else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
 245                  != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
 246                 /* A single write cannot spread over two sectors */
 247                 i40e_debug(hw, I40E_DEBUG_NVM,
 248                            "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
 249                            offset, words);
 250         else
 251                 ret_code = i40e_aq_read_nvm(hw, module_pointer,
 252                                             2 * offset,  /*bytes*/
 253                                             2 * words,   /*bytes*/
 254                                             data, last_command, &cmd_details);
 255 
 256         return ret_code;
 257 }
 258 
 259 /**
 260  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
 261  * @hw: pointer to the HW structure
 262  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
 263  * @data: word read from the Shadow RAM
 264  *
 265  * Reads one 16 bit word from the Shadow RAM using the AdminQ
 266  **/
 267 static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
 268                                          u16 *data)
 269 {
 270         i40e_status ret_code = I40E_ERR_TIMEOUT;
 271 
 272         ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
 273         *data = le16_to_cpu(*(__le16 *)data);
 274 
 275         return ret_code;
 276 }
 277 
 278 /**
 279  * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
 280  * @hw: pointer to the HW structure
 281  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
 282  * @data: word read from the Shadow RAM
 283  *
 284  * Reads one 16 bit word from the Shadow RAM.
 285  *
 286  * Do not use this function except in cases where the nvm lock is already
 287  * taken via i40e_acquire_nvm().
 288  **/
 289 static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
 290                                         u16 offset, u16 *data)
 291 {
 292         if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
 293                 return i40e_read_nvm_word_aq(hw, offset, data);
 294 
 295         return i40e_read_nvm_word_srctl(hw, offset, data);
 296 }
 297 
 298 /**
 299  * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
 300  * @hw: pointer to the HW structure
 301  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
 302  * @data: word read from the Shadow RAM
 303  *
 304  * Reads one 16 bit word from the Shadow RAM.
 305  **/
 306 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
 307                                u16 *data)
 308 {
 309         i40e_status ret_code = 0;
 310 
 311         if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
 312                 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 313         if (ret_code)
 314                 return ret_code;
 315 
 316         ret_code = __i40e_read_nvm_word(hw, offset, data);
 317 
 318         if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
 319                 i40e_release_nvm(hw);
 320 
 321         return ret_code;
 322 }
 323 
 324 /**
 325  * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
 326  * @hw: pointer to the HW structure
 327  * @module_ptr: Pointer to module in words with respect to NVM beginning
 328  * @offset: offset in words from module start
 329  * @words_data_size: Words to read from NVM
 330  * @data_ptr: Pointer to memory location where resulting buffer will be stored
 331  **/
 332 i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
 333                                       u8 module_ptr, u16 offset,
 334                                       u16 words_data_size,
 335                                       u16 *data_ptr)
 336 {
 337         i40e_status status;
 338         u16 ptr_value = 0;
 339         u32 flat_offset;
 340 
 341         if (module_ptr != 0) {
 342                 status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
 343                 if (status) {
 344                         i40e_debug(hw, I40E_DEBUG_ALL,
 345                                    "Reading nvm word failed.Error code: %d.\n",
 346                                    status);
 347                         return I40E_ERR_NVM;
 348                 }
 349         }
 350 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF
 351 #define I40E_NVM_INVALID_VAL 0xFFFF
 352 
 353         /* Pointer not initialized */
 354         if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
 355             ptr_value == I40E_NVM_INVALID_VAL)
 356                 return I40E_ERR_BAD_PTR;
 357 
 358         /* Check whether the module is in SR mapped area or outside */
 359         if (ptr_value & I40E_PTR_TYPE) {
 360                 /* Pointer points outside of the Shared RAM mapped area */
 361                 ptr_value &= ~I40E_PTR_TYPE;
 362 
 363                 /* PtrValue in 4kB units, need to convert to words */
 364                 ptr_value /= 2;
 365                 flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset;
 366                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 367                 if (!status) {
 368                         status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset,
 369                                                   2 * words_data_size,
 370                                                   data_ptr, true, NULL);
 371                         i40e_release_nvm(hw);
 372                         if (status) {
 373                                 i40e_debug(hw, I40E_DEBUG_ALL,
 374                                            "Reading nvm aq failed.Error code: %d.\n",
 375                                            status);
 376                                 return I40E_ERR_NVM;
 377                         }
 378                 } else {
 379                         return I40E_ERR_NVM;
 380                 }
 381         } else {
 382                 /* Read from the Shadow RAM */
 383                 status = i40e_read_nvm_buffer(hw, ptr_value + offset,
 384                                               &words_data_size, data_ptr);
 385                 if (status) {
 386                         i40e_debug(hw, I40E_DEBUG_ALL,
 387                                    "Reading nvm buffer failed.Error code: %d.\n",
 388                                    status);
 389                 }
 390         }
 391 
 392         return status;
 393 }
 394 
 395 /**
 396  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
 397  * @hw: pointer to the HW structure
 398  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
 399  * @words: (in) number of words to read; (out) number of words actually read
 400  * @data: words read from the Shadow RAM
 401  *
 402  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
 403  * method. The buffer read is preceded by the NVM ownership take
 404  * and followed by the release.
 405  **/
 406 static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
 407                                               u16 *words, u16 *data)
 408 {
 409         i40e_status ret_code = 0;
 410         u16 index, word;
 411 
 412         /* Loop thru the selected region */
 413         for (word = 0; word < *words; word++) {
 414                 index = offset + word;
 415                 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
 416                 if (ret_code)
 417                         break;
 418         }
 419 
 420         /* Update the number of words read from the Shadow RAM */
 421         *words = word;
 422 
 423         return ret_code;
 424 }
 425 
 426 /**
 427  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
 428  * @hw: pointer to the HW structure
 429  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
 430  * @words: (in) number of words to read; (out) number of words actually read
 431  * @data: words read from the Shadow RAM
 432  *
 433  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
 434  * method. The buffer read is preceded by the NVM ownership take
 435  * and followed by the release.
 436  **/
 437 static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
 438                                            u16 *words, u16 *data)
 439 {
 440         i40e_status ret_code;
 441         u16 read_size;
 442         bool last_cmd = false;
 443         u16 words_read = 0;
 444         u16 i = 0;
 445 
 446         do {
 447                 /* Calculate number of bytes we should read in this step.
 448                  * FVL AQ do not allow to read more than one page at a time or
 449                  * to cross page boundaries.
 450                  */
 451                 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
 452                         read_size = min(*words,
 453                                         (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
 454                                       (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
 455                 else
 456                         read_size = min((*words - words_read),
 457                                         I40E_SR_SECTOR_SIZE_IN_WORDS);
 458 
 459                 /* Check if this is last command, if so set proper flag */
 460                 if ((words_read + read_size) >= *words)
 461                         last_cmd = true;
 462 
 463                 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
 464                                             data + words_read, last_cmd);
 465                 if (ret_code)
 466                         goto read_nvm_buffer_aq_exit;
 467 
 468                 /* Increment counter for words already read and move offset to
 469                  * new read location
 470                  */
 471                 words_read += read_size;
 472                 offset += read_size;
 473         } while (words_read < *words);
 474 
 475         for (i = 0; i < *words; i++)
 476                 data[i] = le16_to_cpu(((__le16 *)data)[i]);
 477 
 478 read_nvm_buffer_aq_exit:
 479         *words = words_read;
 480         return ret_code;
 481 }
 482 
 483 /**
 484  * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
 485  * @hw: pointer to the HW structure
 486  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
 487  * @words: (in) number of words to read; (out) number of words actually read
 488  * @data: words read from the Shadow RAM
 489  *
 490  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
 491  * method.
 492  **/
 493 static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
 494                                           u16 offset, u16 *words,
 495                                           u16 *data)
 496 {
 497         if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
 498                 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
 499 
 500         return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
 501 }
 502 
 503 /**
 504  * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
 505  * @hw: pointer to the HW structure
 506  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
 507  * @words: (in) number of words to read; (out) number of words actually read
 508  * @data: words read from the Shadow RAM
 509  *
 510  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
 511  * method. The buffer read is preceded by the NVM ownership take
 512  * and followed by the release.
 513  **/
 514 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
 515                                  u16 *words, u16 *data)
 516 {
 517         i40e_status ret_code = 0;
 518 
 519         if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
 520                 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 521                 if (!ret_code) {
 522                         ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
 523                                                            data);
 524                         i40e_release_nvm(hw);
 525                 }
 526         } else {
 527                 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
 528         }
 529 
 530         return ret_code;
 531 }
 532 
 533 /**
 534  * i40e_write_nvm_aq - Writes Shadow RAM.
 535  * @hw: pointer to the HW structure.
 536  * @module_pointer: module pointer location in words from the NVM beginning
 537  * @offset: offset in words from module start
 538  * @words: number of words to write
 539  * @data: buffer with words to write to the Shadow RAM
 540  * @last_command: tells the AdminQ that this is the last command
 541  *
 542  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
 543  **/
 544 static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
 545                                      u32 offset, u16 words, void *data,
 546                                      bool last_command)
 547 {
 548         i40e_status ret_code = I40E_ERR_NVM;
 549         struct i40e_asq_cmd_details cmd_details;
 550 
 551         memset(&cmd_details, 0, sizeof(cmd_details));
 552         cmd_details.wb_desc = &hw->nvm_wb_desc;
 553 
 554         /* Here we are checking the SR limit only for the flat memory model.
 555          * We cannot do it for the module-based model, as we did not acquire
 556          * the NVM resource yet (we cannot get the module pointer value).
 557          * Firmware will check the module-based model.
 558          */
 559         if ((offset + words) > hw->nvm.sr_size)
 560                 i40e_debug(hw, I40E_DEBUG_NVM,
 561                            "NVM write error: offset %d beyond Shadow RAM limit %d\n",
 562                            (offset + words), hw->nvm.sr_size);
 563         else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
 564                 /* We can write only up to 4KB (one sector), in one AQ write */
 565                 i40e_debug(hw, I40E_DEBUG_NVM,
 566                            "NVM write fail error: tried to write %d words, limit is %d.\n",
 567                            words, I40E_SR_SECTOR_SIZE_IN_WORDS);
 568         else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
 569                  != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
 570                 /* A single write cannot spread over two sectors */
 571                 i40e_debug(hw, I40E_DEBUG_NVM,
 572                            "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
 573                            offset, words);
 574         else
 575                 ret_code = i40e_aq_update_nvm(hw, module_pointer,
 576                                               2 * offset,  /*bytes*/
 577                                               2 * words,   /*bytes*/
 578                                               data, last_command, 0,
 579                                               &cmd_details);
 580 
 581         return ret_code;
 582 }
 583 
 584 /**
 585  * i40e_calc_nvm_checksum - Calculates and returns the checksum
 586  * @hw: pointer to hardware structure
 587  * @checksum: pointer to the checksum
 588  *
 589  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
 590  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
 591  * is customer specific and unknown. Therefore, this function skips all maximum
 592  * possible size of VPD (1kB).
 593  **/
 594 static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
 595                                                     u16 *checksum)
 596 {
 597         i40e_status ret_code;
 598         struct i40e_virt_mem vmem;
 599         u16 pcie_alt_module = 0;
 600         u16 checksum_local = 0;
 601         u16 vpd_module = 0;
 602         u16 *data;
 603         u16 i = 0;
 604 
 605         ret_code = i40e_allocate_virt_mem(hw, &vmem,
 606                                     I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
 607         if (ret_code)
 608                 goto i40e_calc_nvm_checksum_exit;
 609         data = (u16 *)vmem.va;
 610 
 611         /* read pointer to VPD area */
 612         ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
 613         if (ret_code) {
 614                 ret_code = I40E_ERR_NVM_CHECKSUM;
 615                 goto i40e_calc_nvm_checksum_exit;
 616         }
 617 
 618         /* read pointer to PCIe Alt Auto-load module */
 619         ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
 620                                         &pcie_alt_module);
 621         if (ret_code) {
 622                 ret_code = I40E_ERR_NVM_CHECKSUM;
 623                 goto i40e_calc_nvm_checksum_exit;
 624         }
 625 
 626         /* Calculate SW checksum that covers the whole 64kB shadow RAM
 627          * except the VPD and PCIe ALT Auto-load modules
 628          */
 629         for (i = 0; i < hw->nvm.sr_size; i++) {
 630                 /* Read SR page */
 631                 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
 632                         u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
 633 
 634                         ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
 635                         if (ret_code) {
 636                                 ret_code = I40E_ERR_NVM_CHECKSUM;
 637                                 goto i40e_calc_nvm_checksum_exit;
 638                         }
 639                 }
 640 
 641                 /* Skip Checksum word */
 642                 if (i == I40E_SR_SW_CHECKSUM_WORD)
 643                         continue;
 644                 /* Skip VPD module (convert byte size to word count) */
 645                 if ((i >= (u32)vpd_module) &&
 646                     (i < ((u32)vpd_module +
 647                      (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
 648                         continue;
 649                 }
 650                 /* Skip PCIe ALT module (convert byte size to word count) */
 651                 if ((i >= (u32)pcie_alt_module) &&
 652                     (i < ((u32)pcie_alt_module +
 653                      (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
 654                         continue;
 655                 }
 656 
 657                 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
 658         }
 659 
 660         *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
 661 
 662 i40e_calc_nvm_checksum_exit:
 663         i40e_free_virt_mem(hw, &vmem);
 664         return ret_code;
 665 }
 666 
 667 /**
 668  * i40e_update_nvm_checksum - Updates the NVM checksum
 669  * @hw: pointer to hardware structure
 670  *
 671  * NVM ownership must be acquired before calling this function and released
 672  * on ARQ completion event reception by caller.
 673  * This function will commit SR to NVM.
 674  **/
 675 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
 676 {
 677         i40e_status ret_code;
 678         u16 checksum;
 679         __le16 le_sum;
 680 
 681         ret_code = i40e_calc_nvm_checksum(hw, &checksum);
 682         le_sum = cpu_to_le16(checksum);
 683         if (!ret_code)
 684                 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
 685                                              1, &le_sum, true);
 686 
 687         return ret_code;
 688 }
 689 
 690 /**
 691  * i40e_validate_nvm_checksum - Validate EEPROM checksum
 692  * @hw: pointer to hardware structure
 693  * @checksum: calculated checksum
 694  *
 695  * Performs checksum calculation and validates the NVM SW checksum. If the
 696  * caller does not need checksum, the value can be NULL.
 697  **/
 698 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 699                                                  u16 *checksum)
 700 {
 701         i40e_status ret_code = 0;
 702         u16 checksum_sr = 0;
 703         u16 checksum_local = 0;
 704 
 705         /* We must acquire the NVM lock in order to correctly synchronize the
 706          * NVM accesses across multiple PFs. Without doing so it is possible
 707          * for one of the PFs to read invalid data potentially indicating that
 708          * the checksum is invalid.
 709          */
 710         ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 711         if (ret_code)
 712                 return ret_code;
 713         ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
 714         __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
 715         i40e_release_nvm(hw);
 716         if (ret_code)
 717                 return ret_code;
 718 
 719         /* Verify read checksum from EEPROM is the same as
 720          * calculated checksum
 721          */
 722         if (checksum_local != checksum_sr)
 723                 ret_code = I40E_ERR_NVM_CHECKSUM;
 724 
 725         /* If the user cares, return the calculated checksum */
 726         if (checksum)
 727                 *checksum = checksum_local;
 728 
 729         return ret_code;
 730 }
 731 
 732 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
 733                                           struct i40e_nvm_access *cmd,
 734                                           u8 *bytes, int *perrno);
 735 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
 736                                              struct i40e_nvm_access *cmd,
 737                                              u8 *bytes, int *perrno);
 738 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
 739                                              struct i40e_nvm_access *cmd,
 740                                              u8 *bytes, int *errno);
 741 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
 742                                                 struct i40e_nvm_access *cmd,
 743                                                 int *perrno);
 744 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
 745                                          struct i40e_nvm_access *cmd,
 746                                          int *perrno);
 747 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
 748                                          struct i40e_nvm_access *cmd,
 749                                          u8 *bytes, int *perrno);
 750 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
 751                                         struct i40e_nvm_access *cmd,
 752                                         u8 *bytes, int *perrno);
 753 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
 754                                        struct i40e_nvm_access *cmd,
 755                                        u8 *bytes, int *perrno);
 756 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
 757                                              struct i40e_nvm_access *cmd,
 758                                              u8 *bytes, int *perrno);
 759 static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
 760                                             struct i40e_nvm_access *cmd,
 761                                             u8 *bytes, int *perrno);
 762 static inline u8 i40e_nvmupd_get_module(u32 val)
 763 {
 764         return (u8)(val & I40E_NVM_MOD_PNT_MASK);
 765 }
 766 static inline u8 i40e_nvmupd_get_transaction(u32 val)
 767 {
 768         return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
 769 }
 770 
 771 static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
 772 {
 773         return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
 774                     I40E_NVM_PRESERVATION_FLAGS_SHIFT);
 775 }
 776 
 777 static const char * const i40e_nvm_update_state_str[] = {
 778         "I40E_NVMUPD_INVALID",
 779         "I40E_NVMUPD_READ_CON",
 780         "I40E_NVMUPD_READ_SNT",
 781         "I40E_NVMUPD_READ_LCB",
 782         "I40E_NVMUPD_READ_SA",
 783         "I40E_NVMUPD_WRITE_ERA",
 784         "I40E_NVMUPD_WRITE_CON",
 785         "I40E_NVMUPD_WRITE_SNT",
 786         "I40E_NVMUPD_WRITE_LCB",
 787         "I40E_NVMUPD_WRITE_SA",
 788         "I40E_NVMUPD_CSUM_CON",
 789         "I40E_NVMUPD_CSUM_SA",
 790         "I40E_NVMUPD_CSUM_LCB",
 791         "I40E_NVMUPD_STATUS",
 792         "I40E_NVMUPD_EXEC_AQ",
 793         "I40E_NVMUPD_GET_AQ_RESULT",
 794         "I40E_NVMUPD_GET_AQ_EVENT",
 795 };
 796 
 797 /**
 798  * i40e_nvmupd_command - Process an NVM update command
 799  * @hw: pointer to hardware structure
 800  * @cmd: pointer to nvm update command
 801  * @bytes: pointer to the data buffer
 802  * @perrno: pointer to return error code
 803  *
 804  * Dispatches command depending on what update state is current
 805  **/
 806 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 807                                 struct i40e_nvm_access *cmd,
 808                                 u8 *bytes, int *perrno)
 809 {
 810         i40e_status status;
 811         enum i40e_nvmupd_cmd upd_cmd;
 812 
 813         /* assume success */
 814         *perrno = 0;
 815 
 816         /* early check for status command and debug msgs */
 817         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 818 
 819         i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
 820                    i40e_nvm_update_state_str[upd_cmd],
 821                    hw->nvmupd_state,
 822                    hw->nvm_release_on_done, hw->nvm_wait_opcode,
 823                    cmd->command, cmd->config, cmd->offset, cmd->data_size);
 824 
 825         if (upd_cmd == I40E_NVMUPD_INVALID) {
 826                 *perrno = -EFAULT;
 827                 i40e_debug(hw, I40E_DEBUG_NVM,
 828                            "i40e_nvmupd_validate_command returns %d errno %d\n",
 829                            upd_cmd, *perrno);
 830         }
 831 
 832         /* a status request returns immediately rather than
 833          * going into the state machine
 834          */
 835         if (upd_cmd == I40E_NVMUPD_STATUS) {
 836                 if (!cmd->data_size) {
 837                         *perrno = -EFAULT;
 838                         return I40E_ERR_BUF_TOO_SHORT;
 839                 }
 840 
 841                 bytes[0] = hw->nvmupd_state;
 842 
 843                 if (cmd->data_size >= 4) {
 844                         bytes[1] = 0;
 845                         *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
 846                 }
 847 
 848                 /* Clear error status on read */
 849                 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
 850                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 851 
 852                 return 0;
 853         }
 854 
 855         /* Clear status even it is not read and log */
 856         if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
 857                 i40e_debug(hw, I40E_DEBUG_NVM,
 858                            "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
 859                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 860         }
 861 
 862         /* Acquire lock to prevent race condition where adminq_task
 863          * can execute after i40e_nvmupd_nvm_read/write but before state
 864          * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
 865          *
 866          * During NVMUpdate, it is observed that lock could be held for
 867          * ~5ms for most commands. However lock is held for ~60ms for
 868          * NVMUPD_CSUM_LCB command.
 869          */
 870         mutex_lock(&hw->aq.arq_mutex);
 871         switch (hw->nvmupd_state) {
 872         case I40E_NVMUPD_STATE_INIT:
 873                 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
 874                 break;
 875 
 876         case I40E_NVMUPD_STATE_READING:
 877                 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
 878                 break;
 879 
 880         case I40E_NVMUPD_STATE_WRITING:
 881                 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
 882                 break;
 883 
 884         case I40E_NVMUPD_STATE_INIT_WAIT:
 885         case I40E_NVMUPD_STATE_WRITE_WAIT:
 886                 /* if we need to stop waiting for an event, clear
 887                  * the wait info and return before doing anything else
 888                  */
 889                 if (cmd->offset == 0xffff) {
 890                         i40e_nvmupd_clear_wait_state(hw);
 891                         status = 0;
 892                         break;
 893                 }
 894 
 895                 status = I40E_ERR_NOT_READY;
 896                 *perrno = -EBUSY;
 897                 break;
 898 
 899         default:
 900                 /* invalid state, should never happen */
 901                 i40e_debug(hw, I40E_DEBUG_NVM,
 902                            "NVMUPD: no such state %d\n", hw->nvmupd_state);
 903                 status = I40E_NOT_SUPPORTED;
 904                 *perrno = -ESRCH;
 905                 break;
 906         }
 907 
 908         mutex_unlock(&hw->aq.arq_mutex);
 909         return status;
 910 }
 911 
 912 /**
 913  * i40e_nvmupd_state_init - Handle NVM update state Init
 914  * @hw: pointer to hardware structure
 915  * @cmd: pointer to nvm update command buffer
 916  * @bytes: pointer to the data buffer
 917  * @perrno: pointer to return error code
 918  *
 919  * Process legitimate commands of the Init state and conditionally set next
 920  * state. Reject all other commands.
 921  **/
 922 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
 923                                           struct i40e_nvm_access *cmd,
 924                                           u8 *bytes, int *perrno)
 925 {
 926         i40e_status status = 0;
 927         enum i40e_nvmupd_cmd upd_cmd;
 928 
 929         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 930 
 931         switch (upd_cmd) {
 932         case I40E_NVMUPD_READ_SA:
 933                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 934                 if (status) {
 935                         *perrno = i40e_aq_rc_to_posix(status,
 936                                                      hw->aq.asq_last_status);
 937                 } else {
 938                         status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
 939                         i40e_release_nvm(hw);
 940                 }
 941                 break;
 942 
 943         case I40E_NVMUPD_READ_SNT:
 944                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 945                 if (status) {
 946                         *perrno = i40e_aq_rc_to_posix(status,
 947                                                      hw->aq.asq_last_status);
 948                 } else {
 949                         status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
 950                         if (status)
 951                                 i40e_release_nvm(hw);
 952                         else
 953                                 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
 954                 }
 955                 break;
 956 
 957         case I40E_NVMUPD_WRITE_ERA:
 958                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
 959                 if (status) {
 960                         *perrno = i40e_aq_rc_to_posix(status,
 961                                                      hw->aq.asq_last_status);
 962                 } else {
 963                         status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
 964                         if (status) {
 965                                 i40e_release_nvm(hw);
 966                         } else {
 967                                 hw->nvm_release_on_done = true;
 968                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
 969                                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
 970                         }
 971                 }
 972                 break;
 973 
 974         case I40E_NVMUPD_WRITE_SA:
 975                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
 976                 if (status) {
 977                         *perrno = i40e_aq_rc_to_posix(status,
 978                                                      hw->aq.asq_last_status);
 979                 } else {
 980                         status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
 981                         if (status) {
 982                                 i40e_release_nvm(hw);
 983                         } else {
 984                                 hw->nvm_release_on_done = true;
 985                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
 986                                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
 987                         }
 988                 }
 989                 break;
 990 
 991         case I40E_NVMUPD_WRITE_SNT:
 992                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
 993                 if (status) {
 994                         *perrno = i40e_aq_rc_to_posix(status,
 995                                                      hw->aq.asq_last_status);
 996                 } else {
 997                         status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
 998                         if (status) {
 999                                 i40e_release_nvm(hw);
1000                         } else {
1001                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1002                                 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1003                         }
1004                 }
1005                 break;
1006 
1007         case I40E_NVMUPD_CSUM_SA:
1008                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1009                 if (status) {
1010                         *perrno = i40e_aq_rc_to_posix(status,
1011                                                      hw->aq.asq_last_status);
1012                 } else {
1013                         status = i40e_update_nvm_checksum(hw);
1014                         if (status) {
1015                                 *perrno = hw->aq.asq_last_status ?
1016                                    i40e_aq_rc_to_posix(status,
1017                                                        hw->aq.asq_last_status) :
1018                                    -EIO;
1019                                 i40e_release_nvm(hw);
1020                         } else {
1021                                 hw->nvm_release_on_done = true;
1022                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1023                                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1024                         }
1025                 }
1026                 break;
1027 
1028         case I40E_NVMUPD_EXEC_AQ:
1029                 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1030                 break;
1031 
1032         case I40E_NVMUPD_GET_AQ_RESULT:
1033                 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1034                 break;
1035 
1036         case I40E_NVMUPD_GET_AQ_EVENT:
1037                 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1038                 break;
1039 
1040         default:
1041                 i40e_debug(hw, I40E_DEBUG_NVM,
1042                            "NVMUPD: bad cmd %s in init state\n",
1043                            i40e_nvm_update_state_str[upd_cmd]);
1044                 status = I40E_ERR_NVM;
1045                 *perrno = -ESRCH;
1046                 break;
1047         }
1048         return status;
1049 }
1050 
1051 /**
1052  * i40e_nvmupd_state_reading - Handle NVM update state Reading
1053  * @hw: pointer to hardware structure
1054  * @cmd: pointer to nvm update command buffer
1055  * @bytes: pointer to the data buffer
1056  * @perrno: pointer to return error code
1057  *
1058  * NVM ownership is already held.  Process legitimate commands and set any
1059  * change in state; reject all other commands.
1060  **/
1061 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
1062                                              struct i40e_nvm_access *cmd,
1063                                              u8 *bytes, int *perrno)
1064 {
1065         i40e_status status = 0;
1066         enum i40e_nvmupd_cmd upd_cmd;
1067 
1068         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1069 
1070         switch (upd_cmd) {
1071         case I40E_NVMUPD_READ_SA:
1072         case I40E_NVMUPD_READ_CON:
1073                 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1074                 break;
1075 
1076         case I40E_NVMUPD_READ_LCB:
1077                 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1078                 i40e_release_nvm(hw);
1079                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1080                 break;
1081 
1082         default:
1083                 i40e_debug(hw, I40E_DEBUG_NVM,
1084                            "NVMUPD: bad cmd %s in reading state.\n",
1085                            i40e_nvm_update_state_str[upd_cmd]);
1086                 status = I40E_NOT_SUPPORTED;
1087                 *perrno = -ESRCH;
1088                 break;
1089         }
1090         return status;
1091 }
1092 
1093 /**
1094  * i40e_nvmupd_state_writing - Handle NVM update state Writing
1095  * @hw: pointer to hardware structure
1096  * @cmd: pointer to nvm update command buffer
1097  * @bytes: pointer to the data buffer
1098  * @perrno: pointer to return error code
1099  *
1100  * NVM ownership is already held.  Process legitimate commands and set any
1101  * change in state; reject all other commands
1102  **/
1103 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
1104                                              struct i40e_nvm_access *cmd,
1105                                              u8 *bytes, int *perrno)
1106 {
1107         i40e_status status = 0;
1108         enum i40e_nvmupd_cmd upd_cmd;
1109         bool retry_attempt = false;
1110 
1111         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1112 
1113 retry:
1114         switch (upd_cmd) {
1115         case I40E_NVMUPD_WRITE_CON:
1116                 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1117                 if (!status) {
1118                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1119                         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1120                 }
1121                 break;
1122 
1123         case I40E_NVMUPD_WRITE_LCB:
1124                 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1125                 if (status) {
1126                         *perrno = hw->aq.asq_last_status ?
1127                                    i40e_aq_rc_to_posix(status,
1128                                                        hw->aq.asq_last_status) :
1129                                    -EIO;
1130                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1131                 } else {
1132                         hw->nvm_release_on_done = true;
1133                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1134                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1135                 }
1136                 break;
1137 
1138         case I40E_NVMUPD_CSUM_CON:
1139                 /* Assumes the caller has acquired the nvm */
1140                 status = i40e_update_nvm_checksum(hw);
1141                 if (status) {
1142                         *perrno = hw->aq.asq_last_status ?
1143                                    i40e_aq_rc_to_posix(status,
1144                                                        hw->aq.asq_last_status) :
1145                                    -EIO;
1146                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1147                 } else {
1148                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1149                         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1150                 }
1151                 break;
1152 
1153         case I40E_NVMUPD_CSUM_LCB:
1154                 /* Assumes the caller has acquired the nvm */
1155                 status = i40e_update_nvm_checksum(hw);
1156                 if (status) {
1157                         *perrno = hw->aq.asq_last_status ?
1158                                    i40e_aq_rc_to_posix(status,
1159                                                        hw->aq.asq_last_status) :
1160                                    -EIO;
1161                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1162                 } else {
1163                         hw->nvm_release_on_done = true;
1164                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1165                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1166                 }
1167                 break;
1168 
1169         default:
1170                 i40e_debug(hw, I40E_DEBUG_NVM,
1171                            "NVMUPD: bad cmd %s in writing state.\n",
1172                            i40e_nvm_update_state_str[upd_cmd]);
1173                 status = I40E_NOT_SUPPORTED;
1174                 *perrno = -ESRCH;
1175                 break;
1176         }
1177 
1178         /* In some circumstances, a multi-write transaction takes longer
1179          * than the default 3 minute timeout on the write semaphore.  If
1180          * the write failed with an EBUSY status, this is likely the problem,
1181          * so here we try to reacquire the semaphore then retry the write.
1182          * We only do one retry, then give up.
1183          */
1184         if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1185             !retry_attempt) {
1186                 i40e_status old_status = status;
1187                 u32 old_asq_status = hw->aq.asq_last_status;
1188                 u32 gtime;
1189 
1190                 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1191                 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1192                         i40e_debug(hw, I40E_DEBUG_ALL,
1193                                    "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1194                                    gtime, hw->nvm.hw_semaphore_timeout);
1195                         i40e_release_nvm(hw);
1196                         status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1197                         if (status) {
1198                                 i40e_debug(hw, I40E_DEBUG_ALL,
1199                                            "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1200                                            hw->aq.asq_last_status);
1201                                 status = old_status;
1202                                 hw->aq.asq_last_status = old_asq_status;
1203                         } else {
1204                                 retry_attempt = true;
1205                                 goto retry;
1206                         }
1207                 }
1208         }
1209 
1210         return status;
1211 }
1212 
1213 /**
1214  * i40e_nvmupd_clear_wait_state - clear wait state on hw
1215  * @hw: pointer to the hardware structure
1216  **/
1217 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1218 {
1219         i40e_debug(hw, I40E_DEBUG_NVM,
1220                    "NVMUPD: clearing wait on opcode 0x%04x\n",
1221                    hw->nvm_wait_opcode);
1222 
1223         if (hw->nvm_release_on_done) {
1224                 i40e_release_nvm(hw);
1225                 hw->nvm_release_on_done = false;
1226         }
1227         hw->nvm_wait_opcode = 0;
1228 
1229         if (hw->aq.arq_last_status) {
1230                 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1231                 return;
1232         }
1233 
1234         switch (hw->nvmupd_state) {
1235         case I40E_NVMUPD_STATE_INIT_WAIT:
1236                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1237                 break;
1238 
1239         case I40E_NVMUPD_STATE_WRITE_WAIT:
1240                 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1241                 break;
1242 
1243         default:
1244                 break;
1245         }
1246 }
1247 
1248 /**
1249  * i40e_nvmupd_check_wait_event - handle NVM update operation events
1250  * @hw: pointer to the hardware structure
1251  * @opcode: the event that just happened
1252  * @desc: AdminQ descriptor
1253  **/
1254 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1255                                   struct i40e_aq_desc *desc)
1256 {
1257         u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1258 
1259         if (opcode == hw->nvm_wait_opcode) {
1260                 memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
1261                 i40e_nvmupd_clear_wait_state(hw);
1262         }
1263 }
1264 
1265 /**
1266  * i40e_nvmupd_validate_command - Validate given command
1267  * @hw: pointer to hardware structure
1268  * @cmd: pointer to nvm update command buffer
1269  * @perrno: pointer to return error code
1270  *
1271  * Return one of the valid command types or I40E_NVMUPD_INVALID
1272  **/
1273 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1274                                                  struct i40e_nvm_access *cmd,
1275                                                  int *perrno)
1276 {
1277         enum i40e_nvmupd_cmd upd_cmd;
1278         u8 module, transaction;
1279 
1280         /* anything that doesn't match a recognized case is an error */
1281         upd_cmd = I40E_NVMUPD_INVALID;
1282 
1283         transaction = i40e_nvmupd_get_transaction(cmd->config);
1284         module = i40e_nvmupd_get_module(cmd->config);
1285 
1286         /* limits on data size */
1287         if ((cmd->data_size < 1) ||
1288             (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1289                 i40e_debug(hw, I40E_DEBUG_NVM,
1290                            "i40e_nvmupd_validate_command data_size %d\n",
1291                            cmd->data_size);
1292                 *perrno = -EFAULT;
1293                 return I40E_NVMUPD_INVALID;
1294         }
1295 
1296         switch (cmd->command) {
1297         case I40E_NVM_READ:
1298                 switch (transaction) {
1299                 case I40E_NVM_CON:
1300                         upd_cmd = I40E_NVMUPD_READ_CON;
1301                         break;
1302                 case I40E_NVM_SNT:
1303                         upd_cmd = I40E_NVMUPD_READ_SNT;
1304                         break;
1305                 case I40E_NVM_LCB:
1306                         upd_cmd = I40E_NVMUPD_READ_LCB;
1307                         break;
1308                 case I40E_NVM_SA:
1309                         upd_cmd = I40E_NVMUPD_READ_SA;
1310                         break;
1311                 case I40E_NVM_EXEC:
1312                         if (module == 0xf)
1313                                 upd_cmd = I40E_NVMUPD_STATUS;
1314                         else if (module == 0)
1315                                 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1316                         break;
1317                 case I40E_NVM_AQE:
1318                         upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1319                         break;
1320                 }
1321                 break;
1322 
1323         case I40E_NVM_WRITE:
1324                 switch (transaction) {
1325                 case I40E_NVM_CON:
1326                         upd_cmd = I40E_NVMUPD_WRITE_CON;
1327                         break;
1328                 case I40E_NVM_SNT:
1329                         upd_cmd = I40E_NVMUPD_WRITE_SNT;
1330                         break;
1331                 case I40E_NVM_LCB:
1332                         upd_cmd = I40E_NVMUPD_WRITE_LCB;
1333                         break;
1334                 case I40E_NVM_SA:
1335                         upd_cmd = I40E_NVMUPD_WRITE_SA;
1336                         break;
1337                 case I40E_NVM_ERA:
1338                         upd_cmd = I40E_NVMUPD_WRITE_ERA;
1339                         break;
1340                 case I40E_NVM_CSUM:
1341                         upd_cmd = I40E_NVMUPD_CSUM_CON;
1342                         break;
1343                 case (I40E_NVM_CSUM|I40E_NVM_SA):
1344                         upd_cmd = I40E_NVMUPD_CSUM_SA;
1345                         break;
1346                 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1347                         upd_cmd = I40E_NVMUPD_CSUM_LCB;
1348                         break;
1349                 case I40E_NVM_EXEC:
1350                         if (module == 0)
1351                                 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1352                         break;
1353                 }
1354                 break;
1355         }
1356 
1357         return upd_cmd;
1358 }
1359 
1360 /**
1361  * i40e_nvmupd_exec_aq - Run an AQ command
1362  * @hw: pointer to hardware structure
1363  * @cmd: pointer to nvm update command buffer
1364  * @bytes: pointer to the data buffer
1365  * @perrno: pointer to return error code
1366  *
1367  * cmd structure contains identifiers and data buffer
1368  **/
1369 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1370                                        struct i40e_nvm_access *cmd,
1371                                        u8 *bytes, int *perrno)
1372 {
1373         struct i40e_asq_cmd_details cmd_details;
1374         i40e_status status;
1375         struct i40e_aq_desc *aq_desc;
1376         u32 buff_size = 0;
1377         u8 *buff = NULL;
1378         u32 aq_desc_len;
1379         u32 aq_data_len;
1380 
1381         i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1382         if (cmd->offset == 0xffff)
1383                 return 0;
1384 
1385         memset(&cmd_details, 0, sizeof(cmd_details));
1386         cmd_details.wb_desc = &hw->nvm_wb_desc;
1387 
1388         aq_desc_len = sizeof(struct i40e_aq_desc);
1389         memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1390 
1391         /* get the aq descriptor */
1392         if (cmd->data_size < aq_desc_len) {
1393                 i40e_debug(hw, I40E_DEBUG_NVM,
1394                            "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1395                            cmd->data_size, aq_desc_len);
1396                 *perrno = -EINVAL;
1397                 return I40E_ERR_PARAM;
1398         }
1399         aq_desc = (struct i40e_aq_desc *)bytes;
1400 
1401         /* if data buffer needed, make sure it's ready */
1402         aq_data_len = cmd->data_size - aq_desc_len;
1403         buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
1404         if (buff_size) {
1405                 if (!hw->nvm_buff.va) {
1406                         status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1407                                                         hw->aq.asq_buf_size);
1408                         if (status)
1409                                 i40e_debug(hw, I40E_DEBUG_NVM,
1410                                            "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1411                                            status);
1412                 }
1413 
1414                 if (hw->nvm_buff.va) {
1415                         buff = hw->nvm_buff.va;
1416                         memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1417                 }
1418         }
1419 
1420         if (cmd->offset)
1421                 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1422 
1423         /* and away we go! */
1424         status = i40e_asq_send_command(hw, aq_desc, buff,
1425                                        buff_size, &cmd_details);
1426         if (status) {
1427                 i40e_debug(hw, I40E_DEBUG_NVM,
1428                            "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1429                            i40e_stat_str(hw, status),
1430                            i40e_aq_str(hw, hw->aq.asq_last_status));
1431                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1432                 return status;
1433         }
1434 
1435         /* should we wait for a followup event? */
1436         if (cmd->offset) {
1437                 hw->nvm_wait_opcode = cmd->offset;
1438                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1439         }
1440 
1441         return status;
1442 }
1443 
1444 /**
1445  * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1446  * @hw: pointer to hardware structure
1447  * @cmd: pointer to nvm update command buffer
1448  * @bytes: pointer to the data buffer
1449  * @perrno: pointer to return error code
1450  *
1451  * cmd structure contains identifiers and data buffer
1452  **/
1453 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1454                                              struct i40e_nvm_access *cmd,
1455                                              u8 *bytes, int *perrno)
1456 {
1457         u32 aq_total_len;
1458         u32 aq_desc_len;
1459         int remainder;
1460         u8 *buff;
1461 
1462         i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1463 
1464         aq_desc_len = sizeof(struct i40e_aq_desc);
1465         aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
1466 
1467         /* check offset range */
1468         if (cmd->offset > aq_total_len) {
1469                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1470                            __func__, cmd->offset, aq_total_len);
1471                 *perrno = -EINVAL;
1472                 return I40E_ERR_PARAM;
1473         }
1474 
1475         /* check copylength range */
1476         if (cmd->data_size > (aq_total_len - cmd->offset)) {
1477                 int new_len = aq_total_len - cmd->offset;
1478 
1479                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1480                            __func__, cmd->data_size, new_len);
1481                 cmd->data_size = new_len;
1482         }
1483 
1484         remainder = cmd->data_size;
1485         if (cmd->offset < aq_desc_len) {
1486                 u32 len = aq_desc_len - cmd->offset;
1487 
1488                 len = min(len, cmd->data_size);
1489                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1490                            __func__, cmd->offset, cmd->offset + len);
1491 
1492                 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1493                 memcpy(bytes, buff, len);
1494 
1495                 bytes += len;
1496                 remainder -= len;
1497                 buff = hw->nvm_buff.va;
1498         } else {
1499                 buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1500         }
1501 
1502         if (remainder > 0) {
1503                 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1504 
1505                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1506                            __func__, start_byte, start_byte + remainder);
1507                 memcpy(bytes, buff, remainder);
1508         }
1509 
1510         return 0;
1511 }
1512 
1513 /**
1514  * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1515  * @hw: pointer to hardware structure
1516  * @cmd: pointer to nvm update command buffer
1517  * @bytes: pointer to the data buffer
1518  * @perrno: pointer to return error code
1519  *
1520  * cmd structure contains identifiers and data buffer
1521  **/
1522 static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1523                                             struct i40e_nvm_access *cmd,
1524                                             u8 *bytes, int *perrno)
1525 {
1526         u32 aq_total_len;
1527         u32 aq_desc_len;
1528 
1529         i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1530 
1531         aq_desc_len = sizeof(struct i40e_aq_desc);
1532         aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
1533 
1534         /* check copylength range */
1535         if (cmd->data_size > aq_total_len) {
1536                 i40e_debug(hw, I40E_DEBUG_NVM,
1537                            "%s: copy length %d too big, trimming to %d\n",
1538                            __func__, cmd->data_size, aq_total_len);
1539                 cmd->data_size = aq_total_len;
1540         }
1541 
1542         memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
1543 
1544         return 0;
1545 }
1546 
1547 /**
1548  * i40e_nvmupd_nvm_read - Read NVM
1549  * @hw: pointer to hardware structure
1550  * @cmd: pointer to nvm update command buffer
1551  * @bytes: pointer to the data buffer
1552  * @perrno: pointer to return error code
1553  *
1554  * cmd structure contains identifiers and data buffer
1555  **/
1556 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1557                                         struct i40e_nvm_access *cmd,
1558                                         u8 *bytes, int *perrno)
1559 {
1560         struct i40e_asq_cmd_details cmd_details;
1561         i40e_status status;
1562         u8 module, transaction;
1563         bool last;
1564 
1565         transaction = i40e_nvmupd_get_transaction(cmd->config);
1566         module = i40e_nvmupd_get_module(cmd->config);
1567         last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1568 
1569         memset(&cmd_details, 0, sizeof(cmd_details));
1570         cmd_details.wb_desc = &hw->nvm_wb_desc;
1571 
1572         status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1573                                   bytes, last, &cmd_details);
1574         if (status) {
1575                 i40e_debug(hw, I40E_DEBUG_NVM,
1576                            "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1577                            module, cmd->offset, cmd->data_size);
1578                 i40e_debug(hw, I40E_DEBUG_NVM,
1579                            "i40e_nvmupd_nvm_read status %d aq %d\n",
1580                            status, hw->aq.asq_last_status);
1581                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1582         }
1583 
1584         return status;
1585 }
1586 
1587 /**
1588  * i40e_nvmupd_nvm_erase - Erase an NVM module
1589  * @hw: pointer to hardware structure
1590  * @cmd: pointer to nvm update command buffer
1591  * @perrno: pointer to return error code
1592  *
1593  * module, offset, data_size and data are in cmd structure
1594  **/
1595 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1596                                          struct i40e_nvm_access *cmd,
1597                                          int *perrno)
1598 {
1599         i40e_status status = 0;
1600         struct i40e_asq_cmd_details cmd_details;
1601         u8 module, transaction;
1602         bool last;
1603 
1604         transaction = i40e_nvmupd_get_transaction(cmd->config);
1605         module = i40e_nvmupd_get_module(cmd->config);
1606         last = (transaction & I40E_NVM_LCB);
1607 
1608         memset(&cmd_details, 0, sizeof(cmd_details));
1609         cmd_details.wb_desc = &hw->nvm_wb_desc;
1610 
1611         status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1612                                    last, &cmd_details);
1613         if (status) {
1614                 i40e_debug(hw, I40E_DEBUG_NVM,
1615                            "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1616                            module, cmd->offset, cmd->data_size);
1617                 i40e_debug(hw, I40E_DEBUG_NVM,
1618                            "i40e_nvmupd_nvm_erase status %d aq %d\n",
1619                            status, hw->aq.asq_last_status);
1620                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1621         }
1622 
1623         return status;
1624 }
1625 
1626 /**
1627  * i40e_nvmupd_nvm_write - Write NVM
1628  * @hw: pointer to hardware structure
1629  * @cmd: pointer to nvm update command buffer
1630  * @bytes: pointer to the data buffer
1631  * @perrno: pointer to return error code
1632  *
1633  * module, offset, data_size and data are in cmd structure
1634  **/
1635 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1636                                          struct i40e_nvm_access *cmd,
1637                                          u8 *bytes, int *perrno)
1638 {
1639         i40e_status status = 0;
1640         struct i40e_asq_cmd_details cmd_details;
1641         u8 module, transaction;
1642         u8 preservation_flags;
1643         bool last;
1644 
1645         transaction = i40e_nvmupd_get_transaction(cmd->config);
1646         module = i40e_nvmupd_get_module(cmd->config);
1647         last = (transaction & I40E_NVM_LCB);
1648         preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1649 
1650         memset(&cmd_details, 0, sizeof(cmd_details));
1651         cmd_details.wb_desc = &hw->nvm_wb_desc;
1652 
1653         status = i40e_aq_update_nvm(hw, module, cmd->offset,
1654                                     (u16)cmd->data_size, bytes, last,
1655                                     preservation_flags, &cmd_details);
1656         if (status) {
1657                 i40e_debug(hw, I40E_DEBUG_NVM,
1658                            "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1659                            module, cmd->offset, cmd->data_size);
1660                 i40e_debug(hw, I40E_DEBUG_NVM,
1661                            "i40e_nvmupd_nvm_write status %d aq %d\n",
1662                            status, hw->aq.asq_last_status);
1663                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1664         }
1665 
1666         return status;
1667 }

/* [<][>][^][v][top][bottom][index][help] */