This source file includes following definitions.
- e1000_raise_eec_clk
- e1000_lower_eec_clk
- e1000_shift_out_eec_bits
- e1000_shift_in_eec_bits
- e1000e_poll_eerd_eewr_done
- e1000e_acquire_nvm
- e1000_standby_nvm
- e1000_stop_nvm
- e1000e_release_nvm
- e1000_ready_nvm_eeprom
- e1000e_read_nvm_eerd
- e1000e_write_nvm_spi
- e1000_read_pba_string_generic
- e1000_read_mac_addr_generic
- e1000e_validate_nvm_checksum_generic
- e1000e_update_nvm_checksum_generic
- e1000e_reload_nvm_generic
1
2
3
4 #include "e1000.h"
5
6
7
8
9
10
11
12
13 static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
14 {
15 *eecd = *eecd | E1000_EECD_SK;
16 ew32(EECD, *eecd);
17 e1e_flush();
18 udelay(hw->nvm.delay_usec);
19 }
20
21
22
23
24
25
26
27
28 static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
29 {
30 *eecd = *eecd & ~E1000_EECD_SK;
31 ew32(EECD, *eecd);
32 e1e_flush();
33 udelay(hw->nvm.delay_usec);
34 }
35
36
37
38
39
40
41
42
43
44
45
46 static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
47 {
48 struct e1000_nvm_info *nvm = &hw->nvm;
49 u32 eecd = er32(EECD);
50 u32 mask;
51
52 mask = BIT(count - 1);
53 if (nvm->type == e1000_nvm_eeprom_spi)
54 eecd |= E1000_EECD_DO;
55
56 do {
57 eecd &= ~E1000_EECD_DI;
58
59 if (data & mask)
60 eecd |= E1000_EECD_DI;
61
62 ew32(EECD, eecd);
63 e1e_flush();
64
65 udelay(nvm->delay_usec);
66
67 e1000_raise_eec_clk(hw, &eecd);
68 e1000_lower_eec_clk(hw, &eecd);
69
70 mask >>= 1;
71 } while (mask);
72
73 eecd &= ~E1000_EECD_DI;
74 ew32(EECD, eecd);
75 }
76
77
78
79
80
81
82
83
84
85
86
87
88 static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
89 {
90 u32 eecd;
91 u32 i;
92 u16 data;
93
94 eecd = er32(EECD);
95 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
96 data = 0;
97
98 for (i = 0; i < count; i++) {
99 data <<= 1;
100 e1000_raise_eec_clk(hw, &eecd);
101
102 eecd = er32(EECD);
103
104 eecd &= ~E1000_EECD_DI;
105 if (eecd & E1000_EECD_DO)
106 data |= 1;
107
108 e1000_lower_eec_clk(hw, &eecd);
109 }
110
111 return data;
112 }
113
114
115
116
117
118
119
120
121
122 s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
123 {
124 u32 attempts = 100000;
125 u32 i, reg = 0;
126
127 for (i = 0; i < attempts; i++) {
128 if (ee_reg == E1000_NVM_POLL_READ)
129 reg = er32(EERD);
130 else
131 reg = er32(EEWR);
132
133 if (reg & E1000_NVM_RW_REG_DONE)
134 return 0;
135
136 udelay(5);
137 }
138
139 return -E1000_ERR_NVM;
140 }
141
142
143
144
145
146
147
148
149
150 s32 e1000e_acquire_nvm(struct e1000_hw *hw)
151 {
152 u32 eecd = er32(EECD);
153 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
154
155 ew32(EECD, eecd | E1000_EECD_REQ);
156 eecd = er32(EECD);
157
158 while (timeout) {
159 if (eecd & E1000_EECD_GNT)
160 break;
161 udelay(5);
162 eecd = er32(EECD);
163 timeout--;
164 }
165
166 if (!timeout) {
167 eecd &= ~E1000_EECD_REQ;
168 ew32(EECD, eecd);
169 e_dbg("Could not acquire NVM grant\n");
170 return -E1000_ERR_NVM;
171 }
172
173 return 0;
174 }
175
176
177
178
179
180
181
182 static void e1000_standby_nvm(struct e1000_hw *hw)
183 {
184 struct e1000_nvm_info *nvm = &hw->nvm;
185 u32 eecd = er32(EECD);
186
187 if (nvm->type == e1000_nvm_eeprom_spi) {
188
189 eecd |= E1000_EECD_CS;
190 ew32(EECD, eecd);
191 e1e_flush();
192 udelay(nvm->delay_usec);
193 eecd &= ~E1000_EECD_CS;
194 ew32(EECD, eecd);
195 e1e_flush();
196 udelay(nvm->delay_usec);
197 }
198 }
199
200
201
202
203
204
205
206 static void e1000_stop_nvm(struct e1000_hw *hw)
207 {
208 u32 eecd;
209
210 eecd = er32(EECD);
211 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
212
213 eecd |= E1000_EECD_CS;
214 e1000_lower_eec_clk(hw, &eecd);
215 }
216 }
217
218
219
220
221
222
223
224 void e1000e_release_nvm(struct e1000_hw *hw)
225 {
226 u32 eecd;
227
228 e1000_stop_nvm(hw);
229
230 eecd = er32(EECD);
231 eecd &= ~E1000_EECD_REQ;
232 ew32(EECD, eecd);
233 }
234
235
236
237
238
239
240
241 static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
242 {
243 struct e1000_nvm_info *nvm = &hw->nvm;
244 u32 eecd = er32(EECD);
245 u8 spi_stat_reg;
246
247 if (nvm->type == e1000_nvm_eeprom_spi) {
248 u16 timeout = NVM_MAX_RETRY_SPI;
249
250
251 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
252 ew32(EECD, eecd);
253 e1e_flush();
254 udelay(1);
255
256
257
258
259
260
261 while (timeout) {
262 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
263 hw->nvm.opcode_bits);
264 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
265 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
266 break;
267
268 udelay(5);
269 e1000_standby_nvm(hw);
270 timeout--;
271 }
272
273 if (!timeout) {
274 e_dbg("SPI NVM Status error\n");
275 return -E1000_ERR_NVM;
276 }
277 }
278
279 return 0;
280 }
281
282
283
284
285
286
287
288
289
290
291 s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
292 {
293 struct e1000_nvm_info *nvm = &hw->nvm;
294 u32 i, eerd = 0;
295 s32 ret_val = 0;
296
297
298
299
300 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
301 (words == 0)) {
302 e_dbg("nvm parameter(s) out of bounds\n");
303 return -E1000_ERR_NVM;
304 }
305
306 for (i = 0; i < words; i++) {
307 eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) +
308 E1000_NVM_RW_REG_START;
309
310 ew32(EERD, eerd);
311 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
312 if (ret_val) {
313 e_dbg("NVM read error: %d\n", ret_val);
314 break;
315 }
316
317 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
318 }
319
320 return ret_val;
321 }
322
323
324
325
326
327
328
329
330
331
332
333
334
335 s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
336 {
337 struct e1000_nvm_info *nvm = &hw->nvm;
338 s32 ret_val = -E1000_ERR_NVM;
339 u16 widx = 0;
340
341
342
343
344 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
345 (words == 0)) {
346 e_dbg("nvm parameter(s) out of bounds\n");
347 return -E1000_ERR_NVM;
348 }
349
350 while (widx < words) {
351 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
352
353 ret_val = nvm->ops.acquire(hw);
354 if (ret_val)
355 return ret_val;
356
357 ret_val = e1000_ready_nvm_eeprom(hw);
358 if (ret_val) {
359 nvm->ops.release(hw);
360 return ret_val;
361 }
362
363 e1000_standby_nvm(hw);
364
365
366 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
367 nvm->opcode_bits);
368
369 e1000_standby_nvm(hw);
370
371
372
373
374 if ((nvm->address_bits == 8) && (offset >= 128))
375 write_opcode |= NVM_A8_OPCODE_SPI;
376
377
378 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
379 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
380 nvm->address_bits);
381
382
383 while (widx < words) {
384 u16 word_out = data[widx];
385
386 word_out = (word_out >> 8) | (word_out << 8);
387 e1000_shift_out_eec_bits(hw, word_out, 16);
388 widx++;
389
390 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
391 e1000_standby_nvm(hw);
392 break;
393 }
394 }
395 usleep_range(10000, 11000);
396 nvm->ops.release(hw);
397 }
398
399 return ret_val;
400 }
401
402
403
404
405
406
407
408
409
410
411 s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
412 u32 pba_num_size)
413 {
414 s32 ret_val;
415 u16 nvm_data;
416 u16 pba_ptr;
417 u16 offset;
418 u16 length;
419
420 if (pba_num == NULL) {
421 e_dbg("PBA string buffer was null\n");
422 return -E1000_ERR_INVALID_ARGUMENT;
423 }
424
425 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
426 if (ret_val) {
427 e_dbg("NVM Read Error\n");
428 return ret_val;
429 }
430
431 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
432 if (ret_val) {
433 e_dbg("NVM Read Error\n");
434 return ret_val;
435 }
436
437
438
439
440
441 if (nvm_data != NVM_PBA_PTR_GUARD) {
442 e_dbg("NVM PBA number is not stored as string\n");
443
444
445 if (pba_num_size < E1000_PBANUM_LENGTH) {
446 e_dbg("PBA string buffer too small\n");
447 return E1000_ERR_NO_SPACE;
448 }
449
450
451 pba_num[0] = (nvm_data >> 12) & 0xF;
452 pba_num[1] = (nvm_data >> 8) & 0xF;
453 pba_num[2] = (nvm_data >> 4) & 0xF;
454 pba_num[3] = nvm_data & 0xF;
455 pba_num[4] = (pba_ptr >> 12) & 0xF;
456 pba_num[5] = (pba_ptr >> 8) & 0xF;
457 pba_num[6] = '-';
458 pba_num[7] = 0;
459 pba_num[8] = (pba_ptr >> 4) & 0xF;
460 pba_num[9] = pba_ptr & 0xF;
461
462
463 pba_num[10] = '\0';
464
465
466 for (offset = 0; offset < 10; offset++) {
467 if (pba_num[offset] < 0xA)
468 pba_num[offset] += '0';
469 else if (pba_num[offset] < 0x10)
470 pba_num[offset] += 'A' - 0xA;
471 }
472
473 return 0;
474 }
475
476 ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
477 if (ret_val) {
478 e_dbg("NVM Read Error\n");
479 return ret_val;
480 }
481
482 if (length == 0xFFFF || length == 0) {
483 e_dbg("NVM PBA number section invalid length\n");
484 return -E1000_ERR_NVM_PBA_SECTION;
485 }
486
487 if (pba_num_size < (((u32)length * 2) - 1)) {
488 e_dbg("PBA string buffer too small\n");
489 return -E1000_ERR_NO_SPACE;
490 }
491
492
493 pba_ptr++;
494 length--;
495
496 for (offset = 0; offset < length; offset++) {
497 ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
498 if (ret_val) {
499 e_dbg("NVM Read Error\n");
500 return ret_val;
501 }
502 pba_num[offset * 2] = (u8)(nvm_data >> 8);
503 pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
504 }
505 pba_num[offset * 2] = '\0';
506
507 return 0;
508 }
509
510
511
512
513
514
515
516
517
518 s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
519 {
520 u32 rar_high;
521 u32 rar_low;
522 u16 i;
523
524 rar_high = er32(RAH(0));
525 rar_low = er32(RAL(0));
526
527 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
528 hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8));
529
530 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
531 hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8));
532
533 for (i = 0; i < ETH_ALEN; i++)
534 hw->mac.addr[i] = hw->mac.perm_addr[i];
535
536 return 0;
537 }
538
539
540
541
542
543
544
545
546 s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
547 {
548 s32 ret_val;
549 u16 checksum = 0;
550 u16 i, nvm_data;
551
552 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
553 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
554 if (ret_val) {
555 e_dbg("NVM Read Error\n");
556 return ret_val;
557 }
558 checksum += nvm_data;
559 }
560
561 if (checksum != (u16)NVM_SUM) {
562 e_dbg("NVM Checksum Invalid\n");
563 return -E1000_ERR_NVM;
564 }
565
566 return 0;
567 }
568
569
570
571
572
573
574
575
576
577 s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
578 {
579 s32 ret_val;
580 u16 checksum = 0;
581 u16 i, nvm_data;
582
583 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
584 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
585 if (ret_val) {
586 e_dbg("NVM Read Error while updating checksum.\n");
587 return ret_val;
588 }
589 checksum += nvm_data;
590 }
591 checksum = (u16)NVM_SUM - checksum;
592 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
593 if (ret_val)
594 e_dbg("NVM Write Error while updating checksum.\n");
595
596 return ret_val;
597 }
598
599
600
601
602
603
604
605
606 void e1000e_reload_nvm_generic(struct e1000_hw *hw)
607 {
608 u32 ctrl_ext;
609
610 usleep_range(10, 20);
611 ctrl_ext = er32(CTRL_EXT);
612 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
613 ew32(CTRL_EXT, ctrl_ext);
614 e1e_flush();
615 }