This source file includes following definitions.
- igb_raise_eec_clk
- igb_lower_eec_clk
- igb_shift_out_eec_bits
- igb_shift_in_eec_bits
- igb_poll_eerd_eewr_done
- igb_acquire_nvm
- igb_standby_nvm
- e1000_stop_nvm
- igb_release_nvm
- igb_ready_nvm_eeprom
- igb_read_nvm_spi
- igb_read_nvm_eerd
- igb_write_nvm_spi
- igb_read_part_string
- igb_read_mac_addr
- igb_validate_nvm_checksum
- igb_update_nvm_checksum
- igb_get_fw_version
1
2
3
4 #include <linux/if_ether.h>
5 #include <linux/delay.h>
6
7 #include "e1000_mac.h"
8 #include "e1000_nvm.h"
9
10
11
12
13
14
15
16
17 static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
18 {
19 *eecd = *eecd | E1000_EECD_SK;
20 wr32(E1000_EECD, *eecd);
21 wrfl();
22 udelay(hw->nvm.delay_usec);
23 }
24
25
26
27
28
29
30
31
32 static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
33 {
34 *eecd = *eecd & ~E1000_EECD_SK;
35 wr32(E1000_EECD, *eecd);
36 wrfl();
37 udelay(hw->nvm.delay_usec);
38 }
39
40
41
42
43
44
45
46
47
48
49
50 static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
51 {
52 struct e1000_nvm_info *nvm = &hw->nvm;
53 u32 eecd = rd32(E1000_EECD);
54 u32 mask;
55
56 mask = 1u << (count - 1);
57 if (nvm->type == e1000_nvm_eeprom_spi)
58 eecd |= E1000_EECD_DO;
59
60 do {
61 eecd &= ~E1000_EECD_DI;
62
63 if (data & mask)
64 eecd |= E1000_EECD_DI;
65
66 wr32(E1000_EECD, eecd);
67 wrfl();
68
69 udelay(nvm->delay_usec);
70
71 igb_raise_eec_clk(hw, &eecd);
72 igb_lower_eec_clk(hw, &eecd);
73
74 mask >>= 1;
75 } while (mask);
76
77 eecd &= ~E1000_EECD_DI;
78 wr32(E1000_EECD, eecd);
79 }
80
81
82
83
84
85
86
87
88
89
90
91
92 static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
93 {
94 u32 eecd;
95 u32 i;
96 u16 data;
97
98 eecd = rd32(E1000_EECD);
99
100 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
101 data = 0;
102
103 for (i = 0; i < count; i++) {
104 data <<= 1;
105 igb_raise_eec_clk(hw, &eecd);
106
107 eecd = rd32(E1000_EECD);
108
109 eecd &= ~E1000_EECD_DI;
110 if (eecd & E1000_EECD_DO)
111 data |= 1;
112
113 igb_lower_eec_clk(hw, &eecd);
114 }
115
116 return data;
117 }
118
119
120
121
122
123
124
125
126
127 static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
128 {
129 u32 attempts = 100000;
130 u32 i, reg = 0;
131 s32 ret_val = -E1000_ERR_NVM;
132
133 for (i = 0; i < attempts; i++) {
134 if (ee_reg == E1000_NVM_POLL_READ)
135 reg = rd32(E1000_EERD);
136 else
137 reg = rd32(E1000_EEWR);
138
139 if (reg & E1000_NVM_RW_REG_DONE) {
140 ret_val = 0;
141 break;
142 }
143
144 udelay(5);
145 }
146
147 return ret_val;
148 }
149
150
151
152
153
154
155
156
157
158 s32 igb_acquire_nvm(struct e1000_hw *hw)
159 {
160 u32 eecd = rd32(E1000_EECD);
161 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
162 s32 ret_val = 0;
163
164
165 wr32(E1000_EECD, eecd | E1000_EECD_REQ);
166 eecd = rd32(E1000_EECD);
167
168 while (timeout) {
169 if (eecd & E1000_EECD_GNT)
170 break;
171 udelay(5);
172 eecd = rd32(E1000_EECD);
173 timeout--;
174 }
175
176 if (!timeout) {
177 eecd &= ~E1000_EECD_REQ;
178 wr32(E1000_EECD, eecd);
179 hw_dbg("Could not acquire NVM grant\n");
180 ret_val = -E1000_ERR_NVM;
181 }
182
183 return ret_val;
184 }
185
186
187
188
189
190
191
192 static void igb_standby_nvm(struct e1000_hw *hw)
193 {
194 struct e1000_nvm_info *nvm = &hw->nvm;
195 u32 eecd = rd32(E1000_EECD);
196
197 if (nvm->type == e1000_nvm_eeprom_spi) {
198
199 eecd |= E1000_EECD_CS;
200 wr32(E1000_EECD, eecd);
201 wrfl();
202 udelay(nvm->delay_usec);
203 eecd &= ~E1000_EECD_CS;
204 wr32(E1000_EECD, eecd);
205 wrfl();
206 udelay(nvm->delay_usec);
207 }
208 }
209
210
211
212
213
214
215
216 static void e1000_stop_nvm(struct e1000_hw *hw)
217 {
218 u32 eecd;
219
220 eecd = rd32(E1000_EECD);
221 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
222
223 eecd |= E1000_EECD_CS;
224 igb_lower_eec_clk(hw, &eecd);
225 }
226 }
227
228
229
230
231
232
233
234 void igb_release_nvm(struct e1000_hw *hw)
235 {
236 u32 eecd;
237
238 e1000_stop_nvm(hw);
239
240 eecd = rd32(E1000_EECD);
241 eecd &= ~E1000_EECD_REQ;
242 wr32(E1000_EECD, eecd);
243 }
244
245
246
247
248
249
250
251 static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
252 {
253 struct e1000_nvm_info *nvm = &hw->nvm;
254 u32 eecd = rd32(E1000_EECD);
255 s32 ret_val = 0;
256 u16 timeout = 0;
257 u8 spi_stat_reg;
258
259
260 if (nvm->type == e1000_nvm_eeprom_spi) {
261
262 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
263 wr32(E1000_EECD, eecd);
264 wrfl();
265 udelay(1);
266 timeout = NVM_MAX_RETRY_SPI;
267
268
269
270
271
272
273 while (timeout) {
274 igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
275 hw->nvm.opcode_bits);
276 spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
277 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
278 break;
279
280 udelay(5);
281 igb_standby_nvm(hw);
282 timeout--;
283 }
284
285 if (!timeout) {
286 hw_dbg("SPI NVM Status error\n");
287 ret_val = -E1000_ERR_NVM;
288 goto out;
289 }
290 }
291
292 out:
293 return ret_val;
294 }
295
296
297
298
299
300
301
302
303
304
305 s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
306 {
307 struct e1000_nvm_info *nvm = &hw->nvm;
308 u32 i = 0;
309 s32 ret_val;
310 u16 word_in;
311 u8 read_opcode = NVM_READ_OPCODE_SPI;
312
313
314
315
316 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
317 (words == 0)) {
318 hw_dbg("nvm parameter(s) out of bounds\n");
319 ret_val = -E1000_ERR_NVM;
320 goto out;
321 }
322
323 ret_val = nvm->ops.acquire(hw);
324 if (ret_val)
325 goto out;
326
327 ret_val = igb_ready_nvm_eeprom(hw);
328 if (ret_val)
329 goto release;
330
331 igb_standby_nvm(hw);
332
333 if ((nvm->address_bits == 8) && (offset >= 128))
334 read_opcode |= NVM_A8_OPCODE_SPI;
335
336
337 igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
338 igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
339
340
341
342
343
344 for (i = 0; i < words; i++) {
345 word_in = igb_shift_in_eec_bits(hw, 16);
346 data[i] = (word_in >> 8) | (word_in << 8);
347 }
348
349 release:
350 nvm->ops.release(hw);
351
352 out:
353 return ret_val;
354 }
355
356
357
358
359
360
361
362
363
364
365 s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
366 {
367 struct e1000_nvm_info *nvm = &hw->nvm;
368 u32 i, eerd = 0;
369 s32 ret_val = 0;
370
371
372
373
374 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
375 (words == 0)) {
376 hw_dbg("nvm parameter(s) out of bounds\n");
377 ret_val = -E1000_ERR_NVM;
378 goto out;
379 }
380
381 for (i = 0; i < words; i++) {
382 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
383 E1000_NVM_RW_REG_START;
384
385 wr32(E1000_EERD, eerd);
386 ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
387 if (ret_val)
388 break;
389
390 data[i] = (rd32(E1000_EERD) >>
391 E1000_NVM_RW_REG_DATA);
392 }
393
394 out:
395 return ret_val;
396 }
397
398
399
400
401
402
403
404
405
406
407
408
409
410 s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
411 {
412 struct e1000_nvm_info *nvm = &hw->nvm;
413 s32 ret_val = -E1000_ERR_NVM;
414 u16 widx = 0;
415
416
417
418
419 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
420 (words == 0)) {
421 hw_dbg("nvm parameter(s) out of bounds\n");
422 return ret_val;
423 }
424
425 while (widx < words) {
426 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
427
428 ret_val = nvm->ops.acquire(hw);
429 if (ret_val)
430 return ret_val;
431
432 ret_val = igb_ready_nvm_eeprom(hw);
433 if (ret_val) {
434 nvm->ops.release(hw);
435 return ret_val;
436 }
437
438 igb_standby_nvm(hw);
439
440
441 igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
442 nvm->opcode_bits);
443
444 igb_standby_nvm(hw);
445
446
447
448
449 if ((nvm->address_bits == 8) && (offset >= 128))
450 write_opcode |= NVM_A8_OPCODE_SPI;
451
452
453 igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
454 igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
455 nvm->address_bits);
456
457
458 while (widx < words) {
459 u16 word_out = data[widx];
460
461 word_out = (word_out >> 8) | (word_out << 8);
462 igb_shift_out_eec_bits(hw, word_out, 16);
463 widx++;
464
465 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
466 igb_standby_nvm(hw);
467 break;
468 }
469 }
470 usleep_range(1000, 2000);
471 nvm->ops.release(hw);
472 }
473
474 return ret_val;
475 }
476
477
478
479
480
481
482
483
484
485
486 s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
487 {
488 s32 ret_val;
489 u16 nvm_data;
490 u16 pointer;
491 u16 offset;
492 u16 length;
493
494 if (part_num == NULL) {
495 hw_dbg("PBA string buffer was null\n");
496 ret_val = E1000_ERR_INVALID_ARGUMENT;
497 goto out;
498 }
499
500 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
501 if (ret_val) {
502 hw_dbg("NVM Read Error\n");
503 goto out;
504 }
505
506 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
507 if (ret_val) {
508 hw_dbg("NVM Read Error\n");
509 goto out;
510 }
511
512
513
514
515
516 if (nvm_data != NVM_PBA_PTR_GUARD) {
517 hw_dbg("NVM PBA number is not stored as string\n");
518
519
520 if (part_num_size < 11) {
521 hw_dbg("PBA string buffer too small\n");
522 return E1000_ERR_NO_SPACE;
523 }
524
525
526 part_num[0] = (nvm_data >> 12) & 0xF;
527 part_num[1] = (nvm_data >> 8) & 0xF;
528 part_num[2] = (nvm_data >> 4) & 0xF;
529 part_num[3] = nvm_data & 0xF;
530 part_num[4] = (pointer >> 12) & 0xF;
531 part_num[5] = (pointer >> 8) & 0xF;
532 part_num[6] = '-';
533 part_num[7] = 0;
534 part_num[8] = (pointer >> 4) & 0xF;
535 part_num[9] = pointer & 0xF;
536
537
538 part_num[10] = '\0';
539
540
541 for (offset = 0; offset < 10; offset++) {
542 if (part_num[offset] < 0xA)
543 part_num[offset] += '0';
544 else if (part_num[offset] < 0x10)
545 part_num[offset] += 'A' - 0xA;
546 }
547
548 goto out;
549 }
550
551 ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
552 if (ret_val) {
553 hw_dbg("NVM Read Error\n");
554 goto out;
555 }
556
557 if (length == 0xFFFF || length == 0) {
558 hw_dbg("NVM PBA number section invalid length\n");
559 ret_val = E1000_ERR_NVM_PBA_SECTION;
560 goto out;
561 }
562
563 if (part_num_size < (((u32)length * 2) - 1)) {
564 hw_dbg("PBA string buffer too small\n");
565 ret_val = E1000_ERR_NO_SPACE;
566 goto out;
567 }
568
569
570 pointer++;
571 length--;
572
573 for (offset = 0; offset < length; offset++) {
574 ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
575 if (ret_val) {
576 hw_dbg("NVM Read Error\n");
577 goto out;
578 }
579 part_num[offset * 2] = (u8)(nvm_data >> 8);
580 part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
581 }
582 part_num[offset * 2] = '\0';
583
584 out:
585 return ret_val;
586 }
587
588
589
590
591
592
593
594
595
596 s32 igb_read_mac_addr(struct e1000_hw *hw)
597 {
598 u32 rar_high;
599 u32 rar_low;
600 u16 i;
601
602 rar_high = rd32(E1000_RAH(0));
603 rar_low = rd32(E1000_RAL(0));
604
605 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
606 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
607
608 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
609 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
610
611 for (i = 0; i < ETH_ALEN; i++)
612 hw->mac.addr[i] = hw->mac.perm_addr[i];
613
614 return 0;
615 }
616
617
618
619
620
621
622
623
624 s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
625 {
626 s32 ret_val = 0;
627 u16 checksum = 0;
628 u16 i, nvm_data;
629
630 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
631 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
632 if (ret_val) {
633 hw_dbg("NVM Read Error\n");
634 goto out;
635 }
636 checksum += nvm_data;
637 }
638
639 if (checksum != (u16) NVM_SUM) {
640 hw_dbg("NVM Checksum Invalid\n");
641 ret_val = -E1000_ERR_NVM;
642 goto out;
643 }
644
645 out:
646 return ret_val;
647 }
648
649
650
651
652
653
654
655
656
657 s32 igb_update_nvm_checksum(struct e1000_hw *hw)
658 {
659 s32 ret_val;
660 u16 checksum = 0;
661 u16 i, nvm_data;
662
663 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
664 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
665 if (ret_val) {
666 hw_dbg("NVM Read Error while updating checksum.\n");
667 goto out;
668 }
669 checksum += nvm_data;
670 }
671 checksum = (u16) NVM_SUM - checksum;
672 ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
673 if (ret_val)
674 hw_dbg("NVM Write Error while updating checksum.\n");
675
676 out:
677 return ret_val;
678 }
679
680
681
682
683
684
685
686
687 void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
688 {
689 u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
690 u8 q, hval, rem, result;
691 u16 comb_verh, comb_verl, comb_offset;
692
693 memset(fw_vers, 0, sizeof(struct e1000_fw_version));
694
695
696
697
698 hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
699 switch (hw->mac.type) {
700 case e1000_i211:
701 igb_read_invm_version(hw, fw_vers);
702 return;
703 case e1000_82575:
704 case e1000_82576:
705 case e1000_82580:
706
707
708
709 if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
710 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
711 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
712 >> NVM_MAJOR_SHIFT;
713 fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
714 >> NVM_MINOR_SHIFT;
715 fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
716 goto etrack_id;
717 }
718 break;
719 case e1000_i210:
720 if (!(igb_get_flash_presence_i210(hw))) {
721 igb_read_invm_version(hw, fw_vers);
722 return;
723 }
724
725 case e1000_i350:
726
727 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
728 if ((comb_offset != 0x0) &&
729 (comb_offset != NVM_VER_INVALID)) {
730
731 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
732 + 1), 1, &comb_verh);
733 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
734 1, &comb_verl);
735
736
737 if ((comb_verh && comb_verl) &&
738 ((comb_verh != NVM_VER_INVALID) &&
739 (comb_verl != NVM_VER_INVALID))) {
740
741 fw_vers->or_valid = true;
742 fw_vers->or_major =
743 comb_verl >> NVM_COMB_VER_SHFT;
744 fw_vers->or_build =
745 (comb_verl << NVM_COMB_VER_SHFT)
746 | (comb_verh >> NVM_COMB_VER_SHFT);
747 fw_vers->or_patch =
748 comb_verh & NVM_COMB_VER_MASK;
749 }
750 }
751 break;
752 default:
753 return;
754 }
755 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
756 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
757 >> NVM_MAJOR_SHIFT;
758
759
760 if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
761 eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
762 } else {
763 eeprom_verl = (fw_version & NVM_MINOR_MASK)
764 >> NVM_MINOR_SHIFT;
765 }
766
767
768
769 q = eeprom_verl / NVM_HEX_CONV;
770 hval = q * NVM_HEX_TENS;
771 rem = eeprom_verl % NVM_HEX_CONV;
772 result = hval + rem;
773 fw_vers->eep_minor = result;
774
775 etrack_id:
776 if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
777 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
778 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
779 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
780 | eeprom_verl;
781 }
782 }